We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 993ec4d commit 76f3931Copy full SHA for 76f3931
comet/modules/layerwise_attention.py
@@ -48,8 +48,16 @@ def __init__(
48
49
self.transform_fn = torch.softmax
50
if layer_transformation == "sparsemax":
51
+ # Import warnings module
52
+ import warnings
53
+ # Display warning message
54
+ warnings.warn(
55
+ "WARNING - sparsemax is DEPRECATED in favor of softmax. "
56
+ "Please use softmax instead. ",
57
+ DeprecationWarning,
58
+ stacklevel=2
59
+ )
60
from entmax import sparsemax
-
61
self.transform_fn = sparsemax
62
63
if layer_weights is None:
0 commit comments