44Launch:
55python3 launch_openai_api_test_server.py
66"""
7+ from distutils .version import LooseVersion
8+ import warnings
79
810import openai
911
12+ try :
13+ from openai import OpenAI , AsyncOpenAI
14+ except ImportError :
15+ warnings .warn ("openai<1.0 is deprecated" )
16+
1017from fastchat .utils import run_cmd
1118
1219openai .api_key = "EMPTY" # Not support yet
1320openai .api_base = "http://localhost:8000/v1"
1421
1522
1623def test_list_models ():
17- model_list = openai .Model .list ()
18- names = [x ["id" ] for x in model_list ["data" ]]
24+ if LooseVersion (openai .__version__ ) < LooseVersion ("1.0" ):
25+ model_list = openai .Model .list ()
26+ else :
27+ client = OpenAI (api_key = openai .api_key , base_url = openai .api_base )
28+ model_list = client .models .list ()
29+ names = [x .id for x in model_list .data ]
1930 return names
2031
2132
2233def test_completion (model , logprob ):
2334 prompt = "Once upon a time"
24- completion = openai .Completion .create (
25- model = model ,
26- prompt = prompt ,
27- logprobs = logprob ,
28- max_tokens = 64 ,
29- temperature = 0 ,
30- )
35+ if LooseVersion (openai .__version__ ) < LooseVersion ("1.0" ):
36+ completion = openai .Completion .create (
37+ model = model ,
38+ prompt = prompt ,
39+ logprobs = logprob ,
40+ max_tokens = 64 ,
41+ temperature = 0 ,
42+ )
43+ else :
44+ client = OpenAI (api_key = openai .api_key , base_url = openai .api_base )
45+ # legacy
46+ completion = client .completions .create (
47+ model = model ,
48+ prompt = prompt ,
49+ logprobs = logprob ,
50+ max_tokens = 64 ,
51+ temperature = 0 ,
52+ )
53+
3154 print (f"full text: { prompt + completion .choices [0 ].text } " , flush = True )
3255 if completion .choices [0 ].logprobs is not None :
3356 print (
@@ -38,42 +61,76 @@ def test_completion(model, logprob):
3861
3962def test_completion_stream (model ):
4063 prompt = "Once upon a time"
41- res = openai .Completion .create (
42- model = model ,
43- prompt = prompt ,
44- max_tokens = 64 ,
45- stream = True ,
46- temperature = 0 ,
47- )
64+ if LooseVersion (openai .__version__ ) < LooseVersion ("1.0" ):
65+ res = openai .Completion .create (
66+ model = model ,
67+ prompt = prompt ,
68+ max_tokens = 64 ,
69+ stream = True ,
70+ temperature = 0 ,
71+ )
72+ else :
73+ client = OpenAI (api_key = openai .api_key , base_url = openai .api_base )
74+ # legacy
75+ res = client .completions .create (
76+ model = model ,
77+ prompt = prompt ,
78+ max_tokens = 64 ,
79+ stream = True ,
80+ temperature = 0 ,
81+ )
4882 print (prompt , end = "" )
4983 for chunk in res :
50- content = chunk [ " choices" ] [0 ][ " text" ]
84+ content = chunk . choices [0 ]. text
5185 print (content , end = "" , flush = True )
5286 print ()
5387
5488
5589def test_embedding (model ):
56- embedding = openai .Embedding .create (model = model , input = "Hello world!" )
57- print (f"embedding len: { len (embedding ['data' ][0 ]['embedding' ])} " )
58- print (f"embedding value[:5]: { embedding ['data' ][0 ]['embedding' ][:5 ]} " )
90+ if LooseVersion (openai .__version__ ) < LooseVersion ("1.0" ):
91+ embedding = openai .Embedding .create (model = model , input = "Hello world!" )
92+ else :
93+ client = OpenAI (api_key = openai .api_key , base_url = openai .api_base )
94+ embedding = client .embeddings .create (model = model , input = "Hello world!" )
95+ print (f"embedding len: { len (embedding .data [0 ].embedding )} " )
96+ print (f"embedding value[:5]: { embedding .data [0 ].embedding [:5 ]} " )
5997
6098
6199def test_chat_completion (model ):
62- completion = openai .ChatCompletion .create (
63- model = model ,
64- messages = [{"role" : "user" , "content" : "Hello! What is your name?" }],
65- temperature = 0 ,
66- )
100+ if LooseVersion (openai .__version__ ) < LooseVersion ("1.0" ):
101+ completion = openai .ChatCompletion .create (
102+ model = model ,
103+ messages = [{"role" : "user" , "content" : "Hello! What is your name?" }],
104+ temperature = 0 ,
105+ )
106+ else :
107+ client = OpenAI (api_key = openai .api_key , base_url = openai .api_base )
108+ completion = client .chat .completions .create (
109+ model = model ,
110+ messages = [{"role" : "user" , "content" : "Hello! What is your name?" }],
111+ temperature = 0 ,
112+ )
67113 print (completion .choices [0 ].message .content )
68114
69115
70116def test_chat_completion_stream (model ):
71117 messages = [{"role" : "user" , "content" : "Hello! What is your name?" }]
72- res = openai .ChatCompletion .create (
73- model = model , messages = messages , stream = True , temperature = 0
74- )
118+ if LooseVersion (openai .__version__ ) < LooseVersion ("1.0" ):
119+ res = openai .ChatCompletion .create (
120+ model = model , messages = messages , stream = True , temperature = 0
121+ )
122+ else :
123+ client = OpenAI (api_key = openai .api_key , base_url = openai .api_base )
124+ res = client .chat .completions .create (
125+ model = model , messages = messages , stream = True , temperature = 0
126+ )
75127 for chunk in res :
76- content = chunk ["choices" ][0 ]["delta" ].get ("content" , "" )
128+ try :
129+ content = chunk .choices [0 ].delta .content
130+ if content is None :
131+ content = ""
132+ except Exception as e :
133+ content = chunk .choices [0 ].delta .get ("content" , "" )
77134 print (content , end = "" , flush = True )
78135 print ()
79136
0 commit comments