Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion docs/commands/webserver.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,16 @@ vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/temp
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js"></script>
```

2. Loading
2. deprecation warnings
```
vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/deprecation.py
```

```
def check_deprecated_parameters(
```

3. Loading
```
vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-188ef5e8.js
```
Expand Down
4 changes: 2 additions & 2 deletions fastchat/llm_judge/qa_browser.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

def display_question(category_selector, request: gr.Request):
choices = category_selector_map[category_selector]
return gr.Dropdown.update(
return gr.Dropdown(
value=choices[0],
choices=choices,
)
Expand Down Expand Up @@ -353,7 +353,7 @@ def build_single_answer_browser_tab():


def load_demo():
dropdown_update = gr.Dropdown.update(value=list(category_selector_map.keys())[0])
dropdown_update = gr.Dropdown(value=list(category_selector_map.keys())[0])
return dropdown_update, dropdown_update


Expand Down
8 changes: 7 additions & 1 deletion fastchat/model/model_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_model_info(name: str) -> ModelInfo:
],
"Vicuna",
"https://lmsys.org/blog/2023-03-30-vicuna/",
"a chat assistant fine-tuned from LLaMA on user-shared conversations by LMSYS",
"a chat assistant fine-tuned on user-shared conversations by LMSYS",
)
register_model_info(
["wizardlm-70b", "wizardlm-30b", "wizardlm-13b"],
Expand Down Expand Up @@ -312,3 +312,9 @@ def get_model_info(name: str) -> ModelInfo:
"https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1",
"a large language model by Mistral AI team",
)
register_model_info(
["deluxe-chat-v1"],
"DeluxeChat",
"",
"Deluxe Chat",
)
4 changes: 2 additions & 2 deletions fastchat/serve/gradio_block_arena_anony.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ def load_demo_side_by_side_anony(models_, url_params):

states = (None,) * num_sides
selector_updates = (
gr.Markdown.update(visible=True),
gr.Markdown.update(visible=True),
gr.Markdown(visible=True),
gr.Markdown(visible=True),
)

return states + selector_updates
Expand Down
4 changes: 2 additions & 2 deletions fastchat/serve/gradio_block_arena_named.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ def load_demo_side_by_side_named(models, url_params):
model_right = model_left

selector_updates = (
gr.Dropdown.update(choices=models, value=model_left, visible=True),
gr.Dropdown.update(choices=models, value=model_right, visible=True),
gr.Dropdown(choices=models, value=model_left, visible=True),
gr.Dropdown(choices=models, value=model_right, visible=True),
)

return states + selector_updates
Expand Down
4 changes: 1 addition & 3 deletions fastchat/serve/gradio_web_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,7 @@ def load_demo_single(models, url_params):
if model in models:
selected_model = model

dropdown_update = gr.Dropdown.update(
choices=models, value=selected_model, visible=True
)
dropdown_update = gr.Dropdown(choices=models, value=selected_model, visible=True)

state = None
return state, dropdown_update
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import requests

headers = {"authorization": "Bearer hf_XXX"}

url = "https://huggingface.co/api/datasets/lmsys/lmsys-chat-1m/user-access-request/pending"
a = requests.get(url, headers=headers)

for u in a.json():
user = u["user"]["user"]
url = "https://huggingface.co/api/datasets/lmsys/lmsys-chat-1m/user-access-request/grant"
ret = requests.post(url, headers=headers, json={"user": user})
print(user, ret.status_code)
assert ret.status_code == 200
6 changes: 4 additions & 2 deletions fastchat/serve/monitor/monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,8 @@ def build_leaderboard_tab(elo_results_file, leaderboard_table_file):
elem_id="leaderboard_dataframe",
)
gr.Markdown(
"If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model)."
"If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model).",
elem_id="leaderboard_markdown",
)
else:
pass
Expand All @@ -219,7 +220,8 @@ def build_leaderboard_tab(elo_results_file, leaderboard_table_file):
f"""## More Statistics for Chatbot Arena\n
We added some additional figures to show more statistics. The code for generating them is also included in this [notebook]({notebook_url}).
Please note that you may see different orders from different ranking methods. This is expected for models that perform similarly, as demonstrated by the confidence interval in the bootstrap figure. Going forward, we prefer the classical Elo calculation because of its scalability and interpretability. You can find more discussions in this blog [post](https://lmsys.org/blog/2023-05-03-arena/).
"""
""",
elem_id="leaderboard_markdown",
)

leader_component_values[:] = [md, p1, p2, p3, p4]
Expand Down