Skip to content

Commit 093574d

Browse files
committed
fix openai api server docs
1 parent 82ef3a3 commit 093574d

File tree

2 files changed

+38
-95
lines changed

2 files changed

+38
-95
lines changed

docs/openai_api.md

Lines changed: 11 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -32,37 +32,30 @@ Now, let us test the API server.
3232
### OpenAI Official SDK
3333
The goal of `openai_api_server.py` is to implement a fully OpenAI-compatible API server, so the models can be used directly with [openai-python](https://github.com/openai/openai-python) library.
3434

35-
First, install openai-python:
35+
First, install OpenAI python package >= 1.0:
3636
```bash
3737
pip install --upgrade openai
3838
```
3939

40-
Then, interact with model vicuna:
40+
Then, interact with the Vicuna model:
4141
```python
42-
from openai import OpenAI
43-
# to get proper authentication, make sure to use a valid key that's listed in
44-
# the --api-keys flag. if no flag value is provided, the `api_key` will be ignored.
45-
client = OpenAI(api_key="EMPTY", base_url="http://localhost:8000/v1", default_headers={"x-foo": "true"})
42+
import openai
43+
44+
openai.api_key = "EMPTY"
45+
openai.base_url = "http://localhost:8000/v1/"
4646

4747
model = "vicuna-7b-v1.5"
4848
prompt = "Once upon a time"
4949

50-
# create a completion (legacy)
51-
completion = client.completions.create(
52-
model=model,
53-
prompt=prompt
54-
)
50+
# create a completion
51+
completion = openai.completions.create(model=model, prompt=prompt, max_tokens=64)
5552
# print the completion
5653
print(prompt + completion.choices[0].text)
5754

5855
# create a chat completion
59-
completion = client.chat.completions.create(
60-
model="vicuna-7b-v1.5",
61-
response_format={ "type": "json_object" },
62-
messages=[
63-
{"role": "system", "content": "You are a helpful assistant designed to output JSON."},
64-
{"role": "user", "content": "Who won the world series in 2020?"}
65-
]
56+
completion = openai.chat.completions.create(
57+
model=model,
58+
messages=[{"role": "user", "content": "Hello! What is your name?"}]
6659
)
6760
# print the completion
6861
print(completion.choices[0].message.content)

tests/test_openai_api.py

Lines changed: 27 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -4,52 +4,31 @@
44
Launch:
55
python3 launch_openai_api_test_server.py
66
"""
7-
from distutils.version import LooseVersion
87
import warnings
98

109
import openai
11-
12-
try:
13-
from openai import OpenAI, AsyncOpenAI
14-
except ImportError:
15-
warnings.warn("openai<1.0 is deprecated")
16-
1710
from fastchat.utils import run_cmd
1811

12+
1913
openai.api_key = "EMPTY" # Not support yet
20-
openai.api_base = "http://localhost:8000/v1"
14+
openai.base_url = "http://localhost:8000/v1/"
2115

2216

2317
def test_list_models():
24-
if LooseVersion(openai.__version__) < LooseVersion("1.0"):
25-
model_list = openai.Model.list()
26-
else:
27-
client = OpenAI(api_key=openai.api_key, base_url=openai.api_base)
28-
model_list = client.models.list()
18+
model_list = openai.models.list()
2919
names = [x.id for x in model_list.data]
3020
return names
3121

3222

3323
def test_completion(model, logprob):
3424
prompt = "Once upon a time"
35-
if LooseVersion(openai.__version__) < LooseVersion("1.0"):
36-
completion = openai.Completion.create(
37-
model=model,
38-
prompt=prompt,
39-
logprobs=logprob,
40-
max_tokens=64,
41-
temperature=0,
42-
)
43-
else:
44-
client = OpenAI(api_key=openai.api_key, base_url=openai.api_base)
45-
# legacy
46-
completion = client.completions.create(
47-
model=model,
48-
prompt=prompt,
49-
logprobs=logprob,
50-
max_tokens=64,
51-
temperature=0,
52-
)
25+
completion = openai.completions.create(
26+
model=model,
27+
prompt=prompt,
28+
logprobs=logprob,
29+
max_tokens=64,
30+
temperature=0,
31+
)
5332

5433
print(f"full text: {prompt + completion.choices[0].text}", flush=True)
5534
if completion.choices[0].logprobs is not None:
@@ -61,24 +40,13 @@ def test_completion(model, logprob):
6140

6241
def test_completion_stream(model):
6342
prompt = "Once upon a time"
64-
if LooseVersion(openai.__version__) < LooseVersion("1.0"):
65-
res = openai.Completion.create(
66-
model=model,
67-
prompt=prompt,
68-
max_tokens=64,
69-
stream=True,
70-
temperature=0,
71-
)
72-
else:
73-
client = OpenAI(api_key=openai.api_key, base_url=openai.api_base)
74-
# legacy
75-
res = client.completions.create(
76-
model=model,
77-
prompt=prompt,
78-
max_tokens=64,
79-
stream=True,
80-
temperature=0,
81-
)
43+
res = openai.completions.create(
44+
model=model,
45+
prompt=prompt,
46+
max_tokens=64,
47+
stream=True,
48+
temperature=0,
49+
)
8250
print(prompt, end="")
8351
for chunk in res:
8452
content = chunk.choices[0].text
@@ -87,43 +55,25 @@ def test_completion_stream(model):
8755

8856

8957
def test_embedding(model):
90-
if LooseVersion(openai.__version__) < LooseVersion("1.0"):
91-
embedding = openai.Embedding.create(model=model, input="Hello world!")
92-
else:
93-
client = OpenAI(api_key=openai.api_key, base_url=openai.api_base)
94-
embedding = client.embeddings.create(model=model, input="Hello world!")
58+
embedding = openai.embeddings.create(model=model, input="Hello world!")
9559
print(f"embedding len: {len(embedding.data[0].embedding)}")
9660
print(f"embedding value[:5]: {embedding.data[0].embedding[:5]}")
9761

9862

9963
def test_chat_completion(model):
100-
if LooseVersion(openai.__version__) < LooseVersion("1.0"):
101-
completion = openai.ChatCompletion.create(
102-
model=model,
103-
messages=[{"role": "user", "content": "Hello! What is your name?"}],
104-
temperature=0,
105-
)
106-
else:
107-
client = OpenAI(api_key=openai.api_key, base_url=openai.api_base)
108-
completion = client.chat.completions.create(
109-
model=model,
110-
messages=[{"role": "user", "content": "Hello! What is your name?"}],
111-
temperature=0,
112-
)
64+
completion = openai.chat.completions.create(
65+
model=model,
66+
messages=[{"role": "user", "content": "Hello! What is your name?"}],
67+
temperature=0,
68+
)
11369
print(completion.choices[0].message.content)
11470

11571

11672
def test_chat_completion_stream(model):
11773
messages = [{"role": "user", "content": "Hello! What is your name?"}]
118-
if LooseVersion(openai.__version__) < LooseVersion("1.0"):
119-
res = openai.ChatCompletion.create(
120-
model=model, messages=messages, stream=True, temperature=0
121-
)
122-
else:
123-
client = OpenAI(api_key=openai.api_key, base_url=openai.api_base)
124-
res = client.chat.completions.create(
125-
model=model, messages=messages, stream=True, temperature=0
126-
)
74+
res = openai.chat.completions.create(
75+
model=model, messages=messages, stream=True, temperature=0
76+
)
12777
for chunk in res:
12878
try:
12979
content = chunk.choices[0].delta.content
@@ -192,7 +142,7 @@ def test_openai_curl():
192142
test_chat_completion_stream(model)
193143
try:
194144
test_embedding(model)
195-
except openai.error.APIError as e:
145+
except openai.APIError as e:
196146
print(f"Embedding error: {e}")
197147

198148
print("===== Test curl =====")

0 commit comments

Comments
 (0)