Skip to content

Commit 01049f1

Browse files
author
Ricardo Rei
committed
Bug fix: forgot that some models were actually using sparsemax, replaced patch on hparams.yaml (#244)
1 parent 76f3931 commit 01049f1

File tree

2 files changed

+1
-10
lines changed

2 files changed

+1
-10
lines changed

comet/models/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def load_from_checkpoint(
101101
import pkg_resources
102102
comet_version = pkg_resources.get_distribution("unbabel-comet").version
103103
use_softmax = (pkg_resources.parse_version(comet_version) >= pkg_resources.parse_version("2.2.4") and
104-
hparams.get("layer_transformation") == "sparsemax")
104+
hparams.get("layer_transformation") == "sparsemax_patch")
105105
except:
106106
use_softmax = False
107107

comet/modules/layerwise_attention.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,15 +48,6 @@ def __init__(
4848

4949
self.transform_fn = torch.softmax
5050
if layer_transformation == "sparsemax":
51-
# Import warnings module
52-
import warnings
53-
# Display warning message
54-
warnings.warn(
55-
"WARNING - sparsemax is DEPRECATED in favor of softmax. "
56-
"Please use softmax instead. ",
57-
DeprecationWarning,
58-
stacklevel=2
59-
)
6051
from entmax import sparsemax
6152
self.transform_fn = sparsemax
6253

0 commit comments

Comments
 (0)