Skip to content

Commit 29de51f

Browse files
authored
Update vigogne template (#2580)
1 parent 7fbf5b1 commit 29de51f

File tree

2 files changed

+46
-38
lines changed

2 files changed

+46
-38
lines changed

fastchat/conversation.py

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -962,14 +962,32 @@ def get_conv_template(name: str) -> Conversation:
962962
)
963963
)
964964

965-
# Vigogne Chat default template
965+
# Vigogne Instruct default template
966966
# source: https://github.com/bofenghuang/vigogne
967967
register_conv_template(
968968
Conversation(
969-
name="vigogne-chat",
969+
name="vigogne_instruct",
970+
system_template="### System:\n{system_message}\n\n",
971+
system_message=(
972+
"Ci-dessous se trouve une instruction qui décrit une tâche à accomplir. Rédigez une réponse qui répond de manière"
973+
" précise à la demande."
974+
),
975+
roles=("### Instruction", "### Response"),
976+
sep_style=SeparatorStyle.DOLLY,
977+
sep="\n\n",
978+
sep2="</s>",
979+
)
980+
)
981+
982+
# Vigogne Chat default template
983+
register_conv_template(
984+
Conversation(
985+
name="vigogne_chat_v2",
970986
system_template="<|system|>: {system_message}",
971-
system_message="Vous êtes l'assistant IA nommé Vigogne, créé par Zaion Lab (https://zaion.ai). "
972-
"Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.",
987+
system_message=(
988+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
989+
" autant que vous le pouvez."
990+
),
973991
roles=("<|user|>", "<|assistant|>"),
974992
sep_style=SeparatorStyle.ADD_COLON_TWO,
975993
sep="\n",
@@ -978,6 +996,21 @@ def get_conv_template(name: str) -> Conversation:
978996
)
979997
)
980998

999+
register_conv_template(
1000+
Conversation(
1001+
name="vigogne_chat_v3",
1002+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
1003+
system_message=(
1004+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
1005+
" autant que vous le pouvez."
1006+
),
1007+
roles=("[INST]", "[/INST]"),
1008+
sep_style=SeparatorStyle.LLAMA2,
1009+
sep=" ",
1010+
sep2=" </s>",
1011+
)
1012+
)
1013+
9811014
# Falcon 180B chat template
9821015
# source: https://huggingface.co/spaces/tiiuae/falcon-180b-demo/blob/d1590ee7fae9b6ce331ba7808e61a29dcce9239f/app.py#L28-L37
9831016
register_conv_template(

fastchat/model/model_adapter.py

Lines changed: 9 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1549,13 +1549,13 @@ def get_default_conv_template(self, model_path: str) -> Conversation:
15491549
return get_conv_template("llama2-chinese")
15501550

15511551

1552-
class VigogneInstructAdapter(BaseModelAdapter):
1553-
"""The model adapter for Vigogne-Instruct (e.g., bofenghuang/vigogne-2-7b-instruct)"""
1552+
class VigogneAdapter(BaseModelAdapter):
1553+
"""The model adapter for vigogne (e.g., bofenghuang/vigogne-2-7b-chat)"""
15541554

15551555
use_fast_tokenizer = False
15561556

15571557
def match(self, model_path: str):
1558-
return "vigogne" in model_path.lower() and "instruct" in model_path.lower()
1558+
return bool(re.search(r"vigogne|vigostral", model_path, re.I))
15591559

15601560
def load_model(self, model_path: str, from_pretrained_kwargs: dict):
15611561
revision = from_pretrained_kwargs.get("revision", "main")
@@ -1574,35 +1574,11 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict):
15741574
return model, tokenizer
15751575

15761576
def get_default_conv_template(self, model_path: str) -> Conversation:
1577-
return get_conv_template("alpaca")
1578-
1579-
1580-
class VigogneChatAdapter(BaseModelAdapter):
1581-
"""The model adapter for Vigogne-Chat (e.g., bofenghuang/vigogne-7b-chat)"""
1582-
1583-
use_fast_tokenizer = False
1584-
1585-
def match(self, model_path: str):
1586-
return "vigogne" in model_path.lower() and "chat" in model_path.lower()
1587-
1588-
def load_model(self, model_path: str, from_pretrained_kwargs: dict):
1589-
revision = from_pretrained_kwargs.get("revision", "main")
1590-
tokenizer = AutoTokenizer.from_pretrained(
1591-
model_path,
1592-
use_fast=self.use_fast_tokenizer,
1593-
trust_remote_code=True,
1594-
revision=revision,
1595-
)
1596-
model = AutoModelForCausalLM.from_pretrained(
1597-
model_path,
1598-
trust_remote_code=True,
1599-
low_cpu_mem_usage=True,
1600-
**from_pretrained_kwargs,
1601-
).eval()
1602-
return model, tokenizer
1603-
1604-
def get_default_conv_template(self, model_path: str) -> Conversation:
1605-
return get_conv_template("vigogne-chat")
1577+
if "chat" in model_path.lower():
1578+
if "vigostral" in model_path.lower():
1579+
return get_conv_template("vigogne_chat_v3")
1580+
return get_conv_template("vigogne_chat_v2")
1581+
return get_conv_template("vigogne_instruct")
16061582

16071583

16081584
class OpenLLaMaOpenInstructAdapter(BaseModelAdapter):
@@ -1746,8 +1722,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation:
17461722
register_model_adapter(BGEAdapter)
17471723
register_model_adapter(E5Adapter)
17481724
register_model_adapter(Lamma2ChineseAdapter)
1749-
register_model_adapter(VigogneInstructAdapter)
1750-
register_model_adapter(VigogneChatAdapter)
1725+
register_model_adapter(VigogneAdapter)
17511726
register_model_adapter(OpenLLaMaOpenInstructAdapter)
17521727
register_model_adapter(ReaLMAdapter)
17531728
register_model_adapter(PhindCodeLlamaAdapter)

0 commit comments

Comments
 (0)