Skip to content

Commit ff90f49

Browse files
doneliancamyeroberts
authored andcommitted
Add type hints for ViLT models (huggingface#18577)
* Add type hints for Vilt models * Add missing return type for TokenClassification class
1 parent 261f480 commit ff90f49

File tree

1 file changed

+79
-79
lines changed

1 file changed

+79
-79
lines changed

src/transformers/models/vilt/modeling_vilt.py

Lines changed: 79 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
import collections.abc
1818
import math
1919
from dataclasses import dataclass
20-
from typing import List, Optional, Tuple
20+
from typing import List, Optional, Tuple, Union
2121

2222
import torch
2323
import torch.utils.checkpoint
@@ -761,19 +761,19 @@ class PreTrainedModel
761761
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
762762
def forward(
763763
self,
764-
input_ids=None,
765-
attention_mask=None,
766-
token_type_ids=None,
767-
pixel_values=None,
768-
pixel_mask=None,
769-
head_mask=None,
770-
inputs_embeds=None,
771-
image_embeds=None,
772-
image_token_type_idx=None,
773-
output_attentions=None,
774-
output_hidden_states=None,
775-
return_dict=None,
776-
):
764+
input_ids: Optional[torch.LongTensor] = None,
765+
attention_mask: Optional[torch.FloatTensor] = None,
766+
token_type_ids: Optional[torch.LongTensor] = None,
767+
pixel_values: Optional[torch.FloatTensor] = None,
768+
pixel_mask: Optional[torch.LongTensor] = None,
769+
head_mask: Optional[torch.FloatTensor] = None,
770+
inputs_embeds: Optional[torch.FloatTensor] = None,
771+
image_embeds: Optional[torch.FloatTensor] = None,
772+
image_token_type_idx: Optional[int] = None,
773+
output_attentions: Optional[bool] = None,
774+
output_hidden_states: Optional[bool] = None,
775+
return_dict: Optional[bool] = None,
776+
) -> Union[BaseModelOutputWithPooling, Tuple[torch.FloatTensor]]:
777777
r"""
778778
Returns:
779779
@@ -914,19 +914,19 @@ def set_output_embeddings(self, new_embeddings):
914914
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
915915
def forward(
916916
self,
917-
input_ids=None,
918-
attention_mask=None,
919-
token_type_ids=None,
920-
pixel_values=None,
921-
pixel_mask=None,
922-
head_mask=None,
923-
inputs_embeds=None,
924-
image_embeds=None,
925-
labels=None,
926-
output_attentions=None,
927-
output_hidden_states=None,
928-
return_dict=None,
929-
):
917+
input_ids: Optional[torch.LongTensor] = None,
918+
attention_mask: Optional[torch.FloatTensor] = None,
919+
token_type_ids: Optional[torch.LongTensor] = None,
920+
pixel_values: Optional[torch.FloatTensor] = None,
921+
pixel_mask: Optional[torch.LongTensor] = None,
922+
head_mask: Optional[torch.FloatTensor] = None,
923+
inputs_embeds: Optional[torch.FloatTensor] = None,
924+
image_embeds: Optional[torch.FloatTensor] = None,
925+
labels: Optional[torch.LongTensor] = None,
926+
output_attentions: Optional[bool] = None,
927+
output_hidden_states: Optional[bool] = None,
928+
return_dict: Optional[bool] = None,
929+
) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]:
930930
r"""
931931
labels (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
932932
Labels for computing the masked language modeling loss. Indices should be in *[-100, 0, ...,
@@ -1088,19 +1088,19 @@ def __init__(self, config):
10881088
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
10891089
def forward(
10901090
self,
1091-
input_ids=None,
1092-
attention_mask=None,
1093-
token_type_ids=None,
1094-
pixel_values=None,
1095-
pixel_mask=None,
1096-
head_mask=None,
1097-
inputs_embeds=None,
1098-
image_embeds=None,
1099-
labels=None,
1100-
output_attentions=None,
1101-
output_hidden_states=None,
1102-
return_dict=None,
1103-
):
1091+
input_ids: Optional[torch.LongTensor] = None,
1092+
attention_mask: Optional[torch.FloatTensor] = None,
1093+
token_type_ids: Optional[torch.LongTensor] = None,
1094+
pixel_values: Optional[torch.FloatTensor] = None,
1095+
pixel_mask: Optional[torch.LongTensor] = None,
1096+
head_mask: Optional[torch.FloatTensor] = None,
1097+
inputs_embeds: Optional[torch.FloatTensor] = None,
1098+
image_embeds: Optional[torch.FloatTensor] = None,
1099+
labels: Optional[torch.LongTensor] = None,
1100+
output_attentions: Optional[bool] = None,
1101+
output_hidden_states: Optional[bool] = None,
1102+
return_dict: Optional[bool] = None,
1103+
) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
11041104
r"""
11051105
labels (`torch.FloatTensor` of shape `(batch_size, num_labels)`, *optional*):
11061106
Labels for computing the visual question answering loss. This tensor must be either a one-hot encoding of
@@ -1193,19 +1193,19 @@ def __init__(self, config):
11931193
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
11941194
def forward(
11951195
self,
1196-
input_ids=None,
1197-
attention_mask=None,
1198-
token_type_ids=None,
1199-
pixel_values=None,
1200-
pixel_mask=None,
1201-
head_mask=None,
1202-
inputs_embeds=None,
1203-
image_embeds=None,
1204-
labels=None,
1205-
output_attentions=None,
1206-
output_hidden_states=None,
1207-
return_dict=None,
1208-
):
1196+
input_ids: Optional[torch.LongTensor] = None,
1197+
attention_mask: Optional[torch.FloatTensor] = None,
1198+
token_type_ids: Optional[torch.LongTensor] = None,
1199+
pixel_values: Optional[torch.FloatTensor] = None,
1200+
pixel_mask: Optional[torch.LongTensor] = None,
1201+
head_mask: Optional[torch.FloatTensor] = None,
1202+
inputs_embeds: Optional[torch.FloatTensor] = None,
1203+
image_embeds: Optional[torch.FloatTensor] = None,
1204+
labels: Optional[torch.LongTensor] = None,
1205+
output_attentions: Optional[bool] = None,
1206+
output_hidden_states: Optional[bool] = None,
1207+
return_dict: Optional[bool] = None,
1208+
) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
12091209
r"""
12101210
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
12111211
Labels are currently not supported.
@@ -1299,19 +1299,19 @@ def __init__(self, config):
12991299
@replace_return_docstrings(output_type=ViltForImagesAndTextClassificationOutput, config_class=_CONFIG_FOR_DOC)
13001300
def forward(
13011301
self,
1302-
input_ids=None,
1303-
attention_mask=None,
1304-
token_type_ids=None,
1305-
pixel_values=None,
1306-
pixel_mask=None,
1307-
head_mask=None,
1308-
inputs_embeds=None,
1309-
image_embeds=None,
1310-
labels=None,
1311-
output_attentions=None,
1312-
output_hidden_states=None,
1313-
return_dict=None,
1314-
):
1302+
input_ids: Optional[torch.LongTensor] = None,
1303+
attention_mask: Optional[torch.FloatTensor] = None,
1304+
token_type_ids: Optional[torch.LongTensor] = None,
1305+
pixel_values: Optional[torch.FloatTensor] = None,
1306+
pixel_mask: Optional[torch.LongTensor] = None,
1307+
head_mask: Optional[torch.FloatTensor] = None,
1308+
inputs_embeds: Optional[torch.FloatTensor] = None,
1309+
image_embeds: Optional[torch.FloatTensor] = None,
1310+
labels: Optional[torch.LongTensor] = None,
1311+
output_attentions: Optional[bool] = None,
1312+
output_hidden_states: Optional[bool] = None,
1313+
return_dict: Optional[bool] = None,
1314+
) -> Union[ViltForImagesAndTextClassificationOutput, Tuple[torch.FloatTensor]]:
13151315
r"""
13161316
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
13171317
Binary classification labels.
@@ -1436,19 +1436,19 @@ def __init__(self, config):
14361436
@replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
14371437
def forward(
14381438
self,
1439-
input_ids=None,
1440-
attention_mask=None,
1441-
token_type_ids=None,
1442-
pixel_values=None,
1443-
pixel_mask=None,
1444-
head_mask=None,
1445-
inputs_embeds=None,
1446-
image_embeds=None,
1447-
labels=None,
1448-
output_attentions=None,
1449-
output_hidden_states=None,
1450-
return_dict=None,
1451-
):
1439+
input_ids: Optional[torch.LongTensor] = None,
1440+
attention_mask: Optional[torch.FloatTensor] = None,
1441+
token_type_ids: Optional[torch.LongTensor] = None,
1442+
pixel_values: Optional[torch.FloatTensor] = None,
1443+
pixel_mask: Optional[torch.LongTensor] = None,
1444+
head_mask: Optional[torch.FloatTensor] = None,
1445+
inputs_embeds: Optional[torch.FloatTensor] = None,
1446+
image_embeds: Optional[torch.FloatTensor] = None,
1447+
labels: Optional[torch.LongTensor] = None,
1448+
output_attentions: Optional[bool] = None,
1449+
output_hidden_states: Optional[bool] = None,
1450+
return_dict: Optional[bool] = None,
1451+
) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]:
14521452
r"""
14531453
labels (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
14541454
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.

0 commit comments

Comments
 (0)