Skip to content

Commit 1b7de41

Browse files
Fix issue avoid-missing-comma found at https://codereview.doctor (#16768)
1 parent de8b06f commit 1b7de41

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

β€Žtests/bert_japanese/test_tokenization_bert_japanese.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ def test_mecab_tokenizer_no_normalize(self):
173173
)
174174

175175
def test_wordpiece_tokenizer(self):
176-
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにけは", "こん", "にけは" "ばんは", "##こん", "##にけは", "##ばんは"]
176+
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにけは", "こん", "にけは", "ばんは", "##こん", "##にけは", "##ばんは"]
177177

178178
vocab = {}
179179
for (i, token) in enumerate(vocab_tokens):
@@ -246,7 +246,7 @@ def test_full_tokenizer(self):
246246
)
247247

248248
def test_character_tokenizer(self):
249-
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "γ‚“", "に", "け", "は", "ば", "δΈ–", "η•Œ" "、", "。"]
249+
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "γ‚“", "に", "け", "は", "ば", "δΈ–", "η•Œ", "、", "。"]
250250

251251
vocab = {}
252252
for (i, token) in enumerate(vocab_tokens):

0 commit comments

Comments
Β (0)