Skip to content

Commit 63517fd

Browse files
authored
[M2M100 doc] remove duplicate example (#17175)
* remove duplicate example * remove code block
1 parent 4a419d4 commit 63517fd

File tree

1 file changed

+2
-17
lines changed

1 file changed

+2
-17
lines changed

src/transformers/models/m2m_100/modeling_m2m_100.py

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ def _set_gradient_checkpointing(self, module, value=False):
565565
"""
566566

567567
M2M_100_GENERATION_EXAMPLE = r"""
568-
Translation example::
568+
Translation example:
569569
570570
```python
571571
>>> from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
@@ -1299,22 +1299,7 @@ def forward(
12991299
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
13001300
13011301
Returns:
1302-
1303-
Example:
1304-
1305-
```python
1306-
>>> from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
1307-
1308-
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
1309-
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
1310-
1311-
>>> text_to_translate = "Life is like a box of chocolates"
1312-
>>> model_inputs = tokenizer(text_to_translate, return_tensors="pt")
1313-
1314-
>>> # translate to French
1315-
>>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr"))
1316-
>>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
1317-
```"""
1302+
"""
13181303
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
13191304

13201305
if labels is not None:

0 commit comments

Comments
 (0)