Skip to content

Commit dbae7b2

Browse files
authored
fix: update documentation with latest models (#2068)
1 parent deb3730 commit dbae7b2

File tree

169 files changed

+324
-324
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

169 files changed

+324
-324
lines changed

notebook/LlamaIndex_query_engine.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@
108108
"\n",
109109
"chroma_query_engine = LlamaIndexQueryEngine(\n",
110110
" vector_store=chroma_vector_store,\n",
111-
" llm=OpenAI(model=\"gpt-4o\", temperature=0.0), # Default model for querying, change if needed\n",
111+
" llm=OpenAI(model=\"gpt-4.1\", temperature=0.0), # Default model for querying, change if needed\n",
112112
")"
113113
]
114114
},
@@ -262,7 +262,7 @@
262262
"source": [
263263
"pinecone_query_engine = LlamaIndexQueryEngine(\n",
264264
" vector_store=pinecone_vector_store,\n",
265-
" llm=OpenAI(model=\"gpt-4o\", temperature=0.0), # Default model for querying, change if needed\n",
265+
" llm=OpenAI(model=\"gpt-4.1\", temperature=0.0), # Default model for querying, change if needed\n",
266266
")"
267267
]
268268
},

notebook/agent_tools_run_examples.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
"from autogen.tools import tool\n",
2929
"\n",
3030
"# Load LLM configuration\n",
31-
"llm_config = autogen.LLMConfig.from_json(path=\"OAI_CONFIG_LIST\", cache_seed=42).where(model=[\"gpt-4o-mini\"])"
31+
"llm_config = autogen.LLMConfig.from_json(path=\"OAI_CONFIG_LIST\", cache_seed=42).where(model=[\"gpt-5-nano\"])"
3232
]
3333
},
3434
{

notebook/agentchat_RetrieveChat_couchbase.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@
8787
"# a vector database instance\n",
8888
"from autogen.retrieve_utils import TEXT_FORMATS\n",
8989
"\n",
90-
"config_list = [{\"model\": \"gpt-4o-mini\", \"api_key\": os.environ[\"OPENAI_API_KEY\"], \"api_type\": \"openai\"}]\n",
90+
"config_list = [{\"model\": \"gpt-5-nano\", \"api_key\": os.environ[\"OPENAI_API_KEY\"], \"api_type\": \"openai\"}]\n",
9191
"assert len(config_list) > 0\n",
9292
"print(\"models to use: \", [config_list[i][\"model\"] for i in range(len(config_list))])"
9393
]

notebook/agentchat_assistant_agent_standalone.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@
7575
"metadata": {},
7676
"outputs": [],
7777
"source": [
78-
"llm_config = autogen.LLMConfig.from_json(path=\"OAI_CONFIG_LIST\", temperature=0.8, timeout=600).where(tags=\"gpt-4o-mini\")"
78+
"llm_config = autogen.LLMConfig.from_json(path=\"OAI_CONFIG_LIST\", temperature=0.8, timeout=600).where(tags=\"gpt-5-nano\")"
7979
]
8080
},
8181
{

notebook/agentchat_captainagent.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
"```python\n",
4242
"config_list = [\n",
4343
" {\n",
44-
" 'model': 'gpt-4o-mini',\n",
44+
" 'model': 'gpt-5-nano',\n",
4545
" 'api_key': '<your OpenAI API key here>',\n",
4646
" },\n",
4747
" {\n",
@@ -78,15 +78,15 @@
7878
"\n",
7979
"config_path = \"OAI_CONFIG_LIST\"\n",
8080
"# You can modify the filter_dict to select your model\n",
81-
"llm_config = autogen.LLMConfig.from_json(path=config_path).where(model=\"gpt-4o\")"
81+
"llm_config = autogen.LLMConfig.from_json(path=config_path).where(model=\"gpt-5\")"
8282
]
8383
},
8484
{
8585
"cell_type": "markdown",
8686
"metadata": {},
8787
"source": [
8888
"## Using CaptainAgent without libraries\n",
89-
"We begin with demonstrating how to use CaptainAgent without retrieving from libraries. In this case, CaptainAgent will automatically generate a set of experts according to its identified subtask and initiate the group chat. By default, the backbone of the LLM is set to `gpt-4o`. For instructions on configuring the backbone, refer to docs on [`nested_mode`](https://docs.ag2.ai/latest/docs/user-guide/reference-agents/captainagent)."
89+
"We begin with demonstrating how to use CaptainAgent without retrieving from libraries. In this case, CaptainAgent will automatically generate a set of experts according to its identified subtask and initiate the group chat. By default, the backbone of the LLM is set to `gpt-5`. For instructions on configuring the backbone, refer to docs on [`nested_mode`](https://docs.ag2.ai/latest/docs/user-guide/reference-agents/captainagent)."
9090
]
9191
},
9292
{

notebook/agentchat_captainagent_crosstool.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@
7272
"import autogen\n",
7373
"\n",
7474
"config_path = \"OAI_CONFIG_LIST\"\n",
75-
"llm_config = autogen.LLMConfig.from_json(path=config_path, temperature=0).where(model=\"gpt-4o\")"
75+
"llm_config = autogen.LLMConfig.from_json(path=config_path, temperature=0).where(model=\"gpt-5\")"
7676
]
7777
},
7878
{

notebook/agentchat_cost_token_tracking.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
"from autogen import AssistantAgent, LLMConfig, OpenAIWrapper, UserProxyAgent, gather_usage_summary\n",
6767
"\n",
6868
"filter_dict = {\n",
69-
" \"tags\": [\"gpt-4o\", \"gpt-4o-mini\"], # comment out to get all\n",
69+
" \"tags\": [\"gpt-5\", \"gpt-5-nano\"], # comment out to get all\n",
7070
"}\n",
7171
"\n",
7272
"config_list = autogen.config_list_from_json(\n",
@@ -85,17 +85,17 @@
8585
"```python\n",
8686
"config_list = [\n",
8787
" {\n",
88-
" \"model\": \"gpt-4o\",\n",
88+
" \"model\": \"gpt-5\",\n",
8989
" \"api_key\": \"<your OpenAI API key>\",\n",
90-
" \"tags\": [\"gpt-4o\"],\n",
91-
" }, # OpenAI API endpoint for gpt-4o\n",
90+
" \"tags\": [\"gpt-5\"],\n",
91+
" }, # OpenAI API endpoint for gpt-5\n",
9292
" {\n",
93-
" \"model\": \"gpt-4o-mini\",\n",
93+
" \"model\": \"gpt-5-nano\",\n",
9494
" \"base_url\": \"<your Azure OpenAI API base>\", \n",
9595
" \"api_type\": \"azure\", \n",
9696
" \"api_version\": \"2024-07-18\",\n",
9797
" \"api_key\": \"<your Azure OpenAI API key>\",\n",
98-
" \"tags\": [\"gpt-4o-mini\", \"20240718\"],\n",
98+
" \"tags\": [\"gpt-5-nano\", \"20240718\"],\n",
9999
" }\n",
100100
"]\n",
101101
"```\n",

notebook/agentchat_function_call_currency_converter_tool_api.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@
107107
"llm_config = autogen.LLMConfig(\n",
108108
" config_list=[\n",
109109
" {\n",
110-
" \"model\": \"gpt-4o\",\n",
110+
" \"model\": \"gpt-4.1\",\n",
111111
" \"api_type\": \"azure\",\n",
112112
" \"api_key\": os.getenv(\"AZURE_OPENAI_API_KEY\"),\n",
113113
" \"base_url\": os.getenv(\"BASE_URL\"),\n",

notebook/agentchat_graph_rag_neo4j.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@
8686
"\n",
8787
"> **Important** \n",
8888
"> - **Default Models**:\n",
89-
"> - **Question Answering**: OpenAI's `GPT-4o` with `temperature=0.0`.\n",
89+
"> - **Question Answering**: OpenAI's `gpt-4.1` with `temperature=0.0`.\n",
9090
"> - **Embedding**: OpenAI's `text-embedding-3-small`.\n",
9191
"> \n",
9292
"> - **Customization**:\n",
@@ -167,7 +167,7 @@
167167
" password=\"password\", # Change if you reset password\n",
168168
" host=\"bolt://172.17.0.3\", # Change\n",
169169
" port=7687, # if needed\n",
170-
" llm=OpenAI(model=\"gpt-4o\", temperature=0.0), # Default, no need to specify\n",
170+
" llm=OpenAI(model=\"gpt-4.1\", temperature=0.0), # Default, no need to specify\n",
171171
" embedding=OpenAIEmbedding(model_name=\"text-embedding-3-small\"), # except you want to use a different model\n",
172172
" database=\"neo4j\", # Change if you want to store the graphh in your custom database\n",
173173
")\n",
@@ -199,7 +199,7 @@
199199
" password=\"password\", # Change if you reset password\n",
200200
" host=\"bolt://172.17.0.3\", # Change\n",
201201
" port=7687, # if needed\n",
202-
" llm=OpenAI(model=\"gpt-4o\", temperature=0.0), # Default, no need to specify\n",
202+
" llm=OpenAI(model=\"gpt-4.1\", temperature=0.0), # Default, no need to specify\n",
203203
" embedding=OpenAIEmbedding(model_name=\"text-embedding-3-small\"), # except you want to use a different model\n",
204204
" database=\"neo4j\", # Change if you want to store the graphh in your custom database\n",
205205
")\n",
@@ -320,7 +320,7 @@
320320
" host=\"bolt://192.168.0.115\",\n",
321321
" port=7687,\n",
322322
" database=\"neo4j\",\n",
323-
" llm=OpenAI(model=\"gpt-4o\", temperature=0.0),\n",
323+
" llm=OpenAI(model=\"gpt-4.1\", temperature=0.0),\n",
324324
" embedding=OpenAIEmbedding(model_name=\"text-embedding-3-small\"),\n",
325325
" entities=entities, # possible entities\n",
326326
" relations=relations, # possible relations\n",

notebook/agentchat_graph_rag_neo4j_native.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,8 @@
7878
"\n",
7979
"**Important** \n",
8080
"- **Default Models**:\n",
81-
" - **Knowledge Graph Construction** OpenAI's `GPT-4o` with `json_object` output `temperature=0.0`.\n",
82-
" - **Question Answering**: OpenAI's `GPT-4o` with `temperature=0.0`.\n",
81+
" - **Knowledge Graph Construction** OpenAI's `gpt-5` with `json_object` output `temperature=0.0`.\n",
82+
" - **Question Answering**: OpenAI's `gpt-5` with `temperature=0.0`.\n",
8383
" - **Embedding**: OpenAI's `text-embedding-3-large`. You need to provide its dimension for the query engine later.\n",
8484
"\n",
8585
"- **Customization**:\n",
@@ -102,15 +102,15 @@
102102
"from neo4j_graphrag.llm.openai_llm import OpenAILLM\n",
103103
"\n",
104104
"llm = OpenAILLM(\n",
105-
" model_name=\"gpt-4o\",\n",
105+
" model_name=\"gpt-5\",\n",
106106
" model_params={\n",
107107
" \"response_format\": {\"type\": \"json_object\"}, # Json format response is required for the LLM\n",
108108
" \"temperature\": 0,\n",
109109
" },\n",
110110
")\n",
111111
"\n",
112112
"query_llm = OpenAILLM(\n",
113-
" model_name=\"gpt-4o\",\n",
113+
" model_name=\"gpt-5\",\n",
114114
" model_params={\"temperature\": 0}, # Don't use json format response for the query LLM\n",
115115
")\n",
116116
"\n",

0 commit comments

Comments
 (0)