Skip to content

Commit 7152ed2

Browse files
authored
Result of new doc style with fixes (huggingface#17015)
* Result of new doc style with fixes * Add last two files * Bump hf-doc-builder
1 parent 18df440 commit 7152ed2

28 files changed

+58
-58
lines changed

docs/source/en/model_doc/bert-generation.mdx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ Usage:
4949
5050
>>> input_ids = tokenizer(
5151
... "This is a long article to summarize", add_special_tokens=False, return_tensors="pt"
52-
>>> ).input_ids
52+
... ).input_ids
5353
>>> labels = tokenizer("This is a short summary", return_tensors="pt").input_ids
5454
5555
>>> # train...
@@ -67,7 +67,7 @@ Usage:
6767
6868
>>> input_ids = tokenizer(
6969
... "This is the first sentence. This is the second sentence.", add_special_tokens=False, return_tensors="pt"
70-
>>> ).input_ids
70+
... ).input_ids
7171
7272
>>> outputs = sentence_fuser.generate(input_ids)
7373

docs/source/en/model_doc/luke.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ Example:
9797
>>> entities = [
9898
... "Beyoncé",
9999
... "Los Angeles",
100-
>>> ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
100+
... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
101101
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
102102
>>> inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
103103
>>> outputs = model(**inputs)

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@
111111
"ftfy",
112112
"fugashi>=1.0",
113113
"GitPython<3.1.19",
114-
"hf-doc-builder>=0.2.0",
114+
"hf-doc-builder>=0.3.0",
115115
"huggingface-hub>=0.1.0,<1.0",
116116
"importlib_metadata",
117117
"ipadic>=1.0.0,<2.0",

src/transformers/dependency_versions_table.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"ftfy": "ftfy",
1919
"fugashi": "fugashi>=1.0",
2020
"GitPython": "GitPython<3.1.19",
21-
"hf-doc-builder": "hf-doc-builder>=0.2.0",
21+
"hf-doc-builder": "hf-doc-builder>=0.3.0",
2222
"huggingface-hub": "huggingface-hub>=0.1.0,<1.0",
2323
"importlib_metadata": "importlib_metadata",
2424
"ipadic": "ipadic>=1.0.0,<2.0",

src/transformers/models/encoder_decoder/modeling_encoder_decoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -457,7 +457,7 @@ def forward(
457457
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
458458
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
459459
... "bert-base-uncased", "bert-base-uncased"
460-
>>> ) # initialize Bert2Bert from pre-trained checkpoints
460+
... ) # initialize Bert2Bert from pre-trained checkpoints
461461
462462
>>> # training
463463
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id

src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ def call(
528528
>>> # forward
529529
>>> input_ids = tokenizer.encode(
530530
... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
531-
>>> ) # Batch size 1
531+
... ) # Batch size 1
532532
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
533533
534534
>>> # training

src/transformers/models/gpt2/modeling_tf_gpt2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1061,7 +1061,7 @@ def call(
10611061
10621062
>>> embedding_layer = model.resize_token_embeddings(
10631063
... len(tokenizer)
1064-
>>> ) # Update the model embeddings with the new vocabulary size
1064+
... ) # Update the model embeddings with the new vocabulary size
10651065
10661066
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
10671067
>>> encoded_choices = [tokenizer.encode(s) for s in choices]

src/transformers/models/imagegpt/modeling_imagegpt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1000,7 +1000,7 @@ def forward(
10001000
>>> samples = output[:, 1:].cpu().detach().numpy()
10011001
>>> samples_img = [
10021002
... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples
1003-
>>> ] # convert color cluster tokens back to pixels
1003+
... ] # convert color cluster tokens back to pixels
10041004
>>> f, axes = plt.subplots(1, batch_size, dpi=300)
10051005
10061006
>>> for img, ax in zip(samples_img, axes):

src/transformers/models/longformer/modeling_longformer.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1634,18 +1634,18 @@ def forward(
16341634
16351635
>>> attention_mask = torch.ones(
16361636
... input_ids.shape, dtype=torch.long, device=input_ids.device
1637-
>>> ) # initialize to local attention
1637+
... ) # initialize to local attention
16381638
>>> global_attention_mask = torch.zeros(
16391639
... input_ids.shape, dtype=torch.long, device=input_ids.device
1640-
>>> ) # initialize to global attention to be deactivated for all tokens
1640+
... ) # initialize to global attention to be deactivated for all tokens
16411641
>>> global_attention_mask[
16421642
... :,
16431643
... [
16441644
... 1,
16451645
... 4,
16461646
... 21,
16471647
... ],
1648-
>>> ] = 1 # Set global attention to random tokens for the sake of this example
1648+
... ] = 1 # Set global attention to random tokens for the sake of this example
16491649
>>> # Usually, set global attention based on the task. For example,
16501650
>>> # classification: the <s> token
16511651
>>> # QA: question tokens
@@ -2025,7 +2025,7 @@ def forward(
20252025
>>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1]
20262026
>>> answer = tokenizer.decode(
20272027
... tokenizer.convert_tokens_to_ids(answer_tokens)
2028-
>>> ) # remove space prepending space token
2028+
... ) # remove space prepending space token
20292029
```"""
20302030
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
20312031

src/transformers/models/luke/modeling_luke.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -953,11 +953,11 @@ def forward(
953953
>>> entities = [
954954
... "Beyoncé",
955955
... "Los Angeles",
956-
>>> ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
956+
... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
957957
>>> entity_spans = [
958958
... (0, 7),
959959
... (17, 28),
960-
>>> ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
960+
... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
961961
962962
>>> encoding = tokenizer(
963963
... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt"
@@ -1435,7 +1435,7 @@ def forward(
14351435
>>> entity_spans = [
14361436
... (0, 7),
14371437
... (17, 28),
1438-
>>> ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
1438+
... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
14391439
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
14401440
>>> outputs = model(**inputs)
14411441
>>> logits = outputs.logits

0 commit comments

Comments
 (0)