|
17 | 17 | import collections.abc |
18 | 18 | import math |
19 | 19 | from dataclasses import dataclass |
20 | | -from typing import List, Optional, Tuple |
| 20 | +from typing import List, Optional, Tuple, Union |
21 | 21 |
|
22 | 22 | import torch |
23 | 23 | import torch.utils.checkpoint |
@@ -761,19 +761,19 @@ class PreTrainedModel |
761 | 761 | @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) |
762 | 762 | def forward( |
763 | 763 | self, |
764 | | - input_ids=None, |
765 | | - attention_mask=None, |
766 | | - token_type_ids=None, |
767 | | - pixel_values=None, |
768 | | - pixel_mask=None, |
769 | | - head_mask=None, |
770 | | - inputs_embeds=None, |
771 | | - image_embeds=None, |
772 | | - image_token_type_idx=None, |
773 | | - output_attentions=None, |
774 | | - output_hidden_states=None, |
775 | | - return_dict=None, |
776 | | - ): |
| 764 | + input_ids: Optional[torch.LongTensor] = None, |
| 765 | + attention_mask: Optional[torch.FloatTensor] = None, |
| 766 | + token_type_ids: Optional[torch.LongTensor] = None, |
| 767 | + pixel_values: Optional[torch.FloatTensor] = None, |
| 768 | + pixel_mask: Optional[torch.LongTensor] = None, |
| 769 | + head_mask: Optional[torch.FloatTensor] = None, |
| 770 | + inputs_embeds: Optional[torch.FloatTensor] = None, |
| 771 | + image_embeds: Optional[torch.FloatTensor] = None, |
| 772 | + image_token_type_idx: Optional[int] = None, |
| 773 | + output_attentions: Optional[bool] = None, |
| 774 | + output_hidden_states: Optional[bool] = None, |
| 775 | + return_dict: Optional[bool] = None, |
| 776 | + ) -> Union[BaseModelOutputWithPooling, Tuple[torch.FloatTensor]]: |
777 | 777 | r""" |
778 | 778 | Returns: |
779 | 779 |
|
@@ -914,19 +914,19 @@ def set_output_embeddings(self, new_embeddings): |
914 | 914 | @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) |
915 | 915 | def forward( |
916 | 916 | self, |
917 | | - input_ids=None, |
918 | | - attention_mask=None, |
919 | | - token_type_ids=None, |
920 | | - pixel_values=None, |
921 | | - pixel_mask=None, |
922 | | - head_mask=None, |
923 | | - inputs_embeds=None, |
924 | | - image_embeds=None, |
925 | | - labels=None, |
926 | | - output_attentions=None, |
927 | | - output_hidden_states=None, |
928 | | - return_dict=None, |
929 | | - ): |
| 917 | + input_ids: Optional[torch.LongTensor] = None, |
| 918 | + attention_mask: Optional[torch.FloatTensor] = None, |
| 919 | + token_type_ids: Optional[torch.LongTensor] = None, |
| 920 | + pixel_values: Optional[torch.FloatTensor] = None, |
| 921 | + pixel_mask: Optional[torch.LongTensor] = None, |
| 922 | + head_mask: Optional[torch.FloatTensor] = None, |
| 923 | + inputs_embeds: Optional[torch.FloatTensor] = None, |
| 924 | + image_embeds: Optional[torch.FloatTensor] = None, |
| 925 | + labels: Optional[torch.LongTensor] = None, |
| 926 | + output_attentions: Optional[bool] = None, |
| 927 | + output_hidden_states: Optional[bool] = None, |
| 928 | + return_dict: Optional[bool] = None, |
| 929 | + ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: |
930 | 930 | r""" |
931 | 931 | labels (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): |
932 | 932 | Labels for computing the masked language modeling loss. Indices should be in *[-100, 0, ..., |
@@ -1088,19 +1088,19 @@ def __init__(self, config): |
1088 | 1088 | @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) |
1089 | 1089 | def forward( |
1090 | 1090 | self, |
1091 | | - input_ids=None, |
1092 | | - attention_mask=None, |
1093 | | - token_type_ids=None, |
1094 | | - pixel_values=None, |
1095 | | - pixel_mask=None, |
1096 | | - head_mask=None, |
1097 | | - inputs_embeds=None, |
1098 | | - image_embeds=None, |
1099 | | - labels=None, |
1100 | | - output_attentions=None, |
1101 | | - output_hidden_states=None, |
1102 | | - return_dict=None, |
1103 | | - ): |
| 1091 | + input_ids: Optional[torch.LongTensor] = None, |
| 1092 | + attention_mask: Optional[torch.FloatTensor] = None, |
| 1093 | + token_type_ids: Optional[torch.LongTensor] = None, |
| 1094 | + pixel_values: Optional[torch.FloatTensor] = None, |
| 1095 | + pixel_mask: Optional[torch.LongTensor] = None, |
| 1096 | + head_mask: Optional[torch.FloatTensor] = None, |
| 1097 | + inputs_embeds: Optional[torch.FloatTensor] = None, |
| 1098 | + image_embeds: Optional[torch.FloatTensor] = None, |
| 1099 | + labels: Optional[torch.LongTensor] = None, |
| 1100 | + output_attentions: Optional[bool] = None, |
| 1101 | + output_hidden_states: Optional[bool] = None, |
| 1102 | + return_dict: Optional[bool] = None, |
| 1103 | + ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: |
1104 | 1104 | r""" |
1105 | 1105 | labels (`torch.FloatTensor` of shape `(batch_size, num_labels)`, *optional*): |
1106 | 1106 | Labels for computing the visual question answering loss. This tensor must be either a one-hot encoding of |
@@ -1193,19 +1193,19 @@ def __init__(self, config): |
1193 | 1193 | @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) |
1194 | 1194 | def forward( |
1195 | 1195 | self, |
1196 | | - input_ids=None, |
1197 | | - attention_mask=None, |
1198 | | - token_type_ids=None, |
1199 | | - pixel_values=None, |
1200 | | - pixel_mask=None, |
1201 | | - head_mask=None, |
1202 | | - inputs_embeds=None, |
1203 | | - image_embeds=None, |
1204 | | - labels=None, |
1205 | | - output_attentions=None, |
1206 | | - output_hidden_states=None, |
1207 | | - return_dict=None, |
1208 | | - ): |
| 1196 | + input_ids: Optional[torch.LongTensor] = None, |
| 1197 | + attention_mask: Optional[torch.FloatTensor] = None, |
| 1198 | + token_type_ids: Optional[torch.LongTensor] = None, |
| 1199 | + pixel_values: Optional[torch.FloatTensor] = None, |
| 1200 | + pixel_mask: Optional[torch.LongTensor] = None, |
| 1201 | + head_mask: Optional[torch.FloatTensor] = None, |
| 1202 | + inputs_embeds: Optional[torch.FloatTensor] = None, |
| 1203 | + image_embeds: Optional[torch.FloatTensor] = None, |
| 1204 | + labels: Optional[torch.LongTensor] = None, |
| 1205 | + output_attentions: Optional[bool] = None, |
| 1206 | + output_hidden_states: Optional[bool] = None, |
| 1207 | + return_dict: Optional[bool] = None, |
| 1208 | + ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: |
1209 | 1209 | r""" |
1210 | 1210 | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
1211 | 1211 | Labels are currently not supported. |
@@ -1299,19 +1299,19 @@ def __init__(self, config): |
1299 | 1299 | @replace_return_docstrings(output_type=ViltForImagesAndTextClassificationOutput, config_class=_CONFIG_FOR_DOC) |
1300 | 1300 | def forward( |
1301 | 1301 | self, |
1302 | | - input_ids=None, |
1303 | | - attention_mask=None, |
1304 | | - token_type_ids=None, |
1305 | | - pixel_values=None, |
1306 | | - pixel_mask=None, |
1307 | | - head_mask=None, |
1308 | | - inputs_embeds=None, |
1309 | | - image_embeds=None, |
1310 | | - labels=None, |
1311 | | - output_attentions=None, |
1312 | | - output_hidden_states=None, |
1313 | | - return_dict=None, |
1314 | | - ): |
| 1302 | + input_ids: Optional[torch.LongTensor] = None, |
| 1303 | + attention_mask: Optional[torch.FloatTensor] = None, |
| 1304 | + token_type_ids: Optional[torch.LongTensor] = None, |
| 1305 | + pixel_values: Optional[torch.FloatTensor] = None, |
| 1306 | + pixel_mask: Optional[torch.LongTensor] = None, |
| 1307 | + head_mask: Optional[torch.FloatTensor] = None, |
| 1308 | + inputs_embeds: Optional[torch.FloatTensor] = None, |
| 1309 | + image_embeds: Optional[torch.FloatTensor] = None, |
| 1310 | + labels: Optional[torch.LongTensor] = None, |
| 1311 | + output_attentions: Optional[bool] = None, |
| 1312 | + output_hidden_states: Optional[bool] = None, |
| 1313 | + return_dict: Optional[bool] = None, |
| 1314 | + ) -> Union[ViltForImagesAndTextClassificationOutput, Tuple[torch.FloatTensor]]: |
1315 | 1315 | r""" |
1316 | 1316 | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
1317 | 1317 | Binary classification labels. |
@@ -1436,19 +1436,19 @@ def __init__(self, config): |
1436 | 1436 | @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) |
1437 | 1437 | def forward( |
1438 | 1438 | self, |
1439 | | - input_ids=None, |
1440 | | - attention_mask=None, |
1441 | | - token_type_ids=None, |
1442 | | - pixel_values=None, |
1443 | | - pixel_mask=None, |
1444 | | - head_mask=None, |
1445 | | - inputs_embeds=None, |
1446 | | - image_embeds=None, |
1447 | | - labels=None, |
1448 | | - output_attentions=None, |
1449 | | - output_hidden_states=None, |
1450 | | - return_dict=None, |
1451 | | - ): |
| 1439 | + input_ids: Optional[torch.LongTensor] = None, |
| 1440 | + attention_mask: Optional[torch.FloatTensor] = None, |
| 1441 | + token_type_ids: Optional[torch.LongTensor] = None, |
| 1442 | + pixel_values: Optional[torch.FloatTensor] = None, |
| 1443 | + pixel_mask: Optional[torch.LongTensor] = None, |
| 1444 | + head_mask: Optional[torch.FloatTensor] = None, |
| 1445 | + inputs_embeds: Optional[torch.FloatTensor] = None, |
| 1446 | + image_embeds: Optional[torch.FloatTensor] = None, |
| 1447 | + labels: Optional[torch.LongTensor] = None, |
| 1448 | + output_attentions: Optional[bool] = None, |
| 1449 | + output_hidden_states: Optional[bool] = None, |
| 1450 | + return_dict: Optional[bool] = None, |
| 1451 | + ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: |
1452 | 1452 | r""" |
1453 | 1453 | labels (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*): |
1454 | 1454 | Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. |
|
0 commit comments