Skip to content

Commit 9fb4479

Browse files
authored
Merge branch 'main' into set-partition-key
2 parents d99e7f3 + dce6623 commit 9fb4479

File tree

78 files changed

+7178
-1869
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

78 files changed

+7178
-1869
lines changed

dapr_agents/agents/agent/agent.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,11 @@ async def process_iterations(self, messages: List[Dict[str, Any]]) -> Any:
201201
response: LLMChatResponse = self.llm.generate(
202202
messages=messages,
203203
tools=self.get_llm_tools(),
204-
tool_choice=self.tool_choice,
204+
**(
205+
{"tool_choice": self.tool_choice}
206+
if self.tool_choice is not None
207+
else {}
208+
),
205209
)
206210
# Get the first candidate from the response
207211
response_message = response.get_message()

dapr_agents/agents/base.py

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
)
2828
from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
2929
from dapr_agents.llm.chat import ChatClientBase
30-
from dapr_agents.llm.openai import OpenAIChatClient
3130

3231
logger = logging.getLogger(__name__)
3332

@@ -66,8 +65,8 @@ class AgentBase(BaseModel, ABC):
6665
default=None,
6766
description="A custom system prompt, overriding name, role, goal, and instructions.",
6867
)
69-
llm: ChatClientBase = Field(
70-
default_factory=OpenAIChatClient,
68+
llm: Optional[ChatClientBase] = Field(
69+
default=None,
7170
description="Language model client for generating responses.",
7271
)
7372
prompt_template: Optional[PromptTemplateBase] = Field(
@@ -136,12 +135,16 @@ def set_name_from_role(cls, values: dict):
136135
@model_validator(mode="after")
137136
def validate_llm(cls, values):
138137
"""Validate that LLM is properly configured."""
139-
if hasattr(values, "llm") and values.llm:
140-
try:
141-
# Validate LLM is properly configured by accessing it as this is required to be set.
142-
_ = values.llm
143-
except Exception as e:
144-
raise ValueError(f"Failed to initialize LLM: {e}") from e
138+
if hasattr(values, "llm"):
139+
if values.llm is None:
140+
logger.warning("LLM client is None, some functionality may be limited.")
141+
else:
142+
try:
143+
# Validate LLM is properly configured by accessing it as this is required to be set.
144+
_ = values.llm
145+
except Exception as e:
146+
logger.error(f"Failed to initialize LLM: {e}")
147+
values.llm = None
145148

146149
return values
147150

@@ -160,10 +163,15 @@ def model_post_init(self, __context: Any) -> None:
160163
if self.tool_choice is None:
161164
self.tool_choice = "auto" if self.tools else None
162165

166+
# Initialize LLM if not provided
167+
if self.llm is None:
168+
self.llm = self._create_default_llm()
169+
163170
# Centralize prompt template selection logic
164171
self.prompt_template = self._initialize_prompt_template()
165172
# Ensure LLM client and agent both reference the same template
166-
self.llm.prompt_template = self.prompt_template
173+
if self.llm is not None:
174+
self.llm.prompt_template = self.prompt_template
167175

168176
self._validate_prompt_template()
169177
self.prefill_agent_attributes()
@@ -174,6 +182,21 @@ def model_post_init(self, __context: Any) -> None:
174182

175183
super().model_post_init(__context)
176184

185+
def _create_default_llm(self) -> Optional[ChatClientBase]:
186+
"""
187+
Creates a default LLM client when none is provided.
188+
Returns None if the default LLM cannot be created due to missing configuration.
189+
"""
190+
try:
191+
from dapr_agents.llm.openai import OpenAIChatClient
192+
193+
return OpenAIChatClient()
194+
except Exception as e:
195+
logger.warning(
196+
f"Failed to create default OpenAI client: {e}. LLM will be None."
197+
)
198+
return None
199+
177200
def _initialize_prompt_template(self) -> PromptTemplateBase:
178201
"""
179202
Determines which prompt template to use for the agent:
@@ -190,7 +213,11 @@ def _initialize_prompt_template(self) -> PromptTemplateBase:
190213
return self.prompt_template
191214

192215
# 2) LLM client has one?
193-
if self.llm.prompt_template:
216+
if (
217+
self.llm
218+
and hasattr(self.llm, "prompt_template")
219+
and self.llm.prompt_template
220+
):
194221
logger.debug("🔄 Syncing from llm.prompt_template")
195222
return self.llm.prompt_template
196223

dapr_agents/agents/durableagent/agent.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,11 @@ async def generate_response(
379379
response: LLMChatResponse = self.llm.generate(
380380
messages=messages,
381381
tools=self.get_llm_tools(),
382-
tool_choice=self.tool_choice,
382+
**(
383+
{"tool_choice": self.tool_choice}
384+
if self.tool_choice is not None
385+
else {}
386+
),
383387
)
384388
# Get the first candidate from the response
385389
response_message = response.get_message()
@@ -418,7 +422,7 @@ async def run_tool(self, tool_call: Dict[str, Any]) -> Dict[str, Any]:
418422
raise AgentError(f"Invalid JSON in tool args: {e}")
419423

420424
# Run the tool
421-
logger.info(f"Executing tool '{fn_name}' with args: {args}")
425+
logger.debug(f"Executing tool '{fn_name}' with args: {args}")
422426
try:
423427
result = await self.tool_executor.run_tool(fn_name, **args)
424428
except Exception as e:

dapr_agents/llm/huggingface/chat.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -162,10 +162,13 @@ def generate(
162162
else:
163163
params.update(kwargs)
164164

165-
# 4) Override model if given
165+
# 4) Add the stream parameter explicitly to params
166+
params["stream"] = stream
167+
168+
# 5) Override model if given
166169
params["model"] = model or self.model
167170

168-
# 5) Inject tools / response_format via RequestHandler
171+
# 6) Inject tools / response_format via RequestHandler
169172
params = RequestHandler.process_params(
170173
params,
171174
llm_provider=self.provider,
@@ -174,11 +177,11 @@ def generate(
174177
structured_mode=structured_mode,
175178
)
176179

177-
# 6) Call HF API + delegate parsing to ResponseHandler
180+
# 7) Call HF API + delegate parsing to ResponseHandler
178181
try:
179182
logger.info("Calling HF ChatCompletion Inference API...")
180183
logger.debug(f"HF params: {params}")
181-
response = self.client.chat.completions.create(**params, stream=stream)
184+
response = self.client.chat.completions.create(**params)
182185
logger.info("HF ChatCompletion response received.")
183186

184187
# HF-specific error‐code handling
@@ -198,4 +201,4 @@ def generate(
198201

199202
except Exception as e:
200203
logger.error("Hugging Face ChatCompletion API error", exc_info=True)
201-
raise ValueError("Failed to process HF chat completion") from e
204+
raise ValueError(f"Failed to process HF chat completion: {e}") from e

dapr_agents/llm/nvidia/chat.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -168,11 +168,14 @@ def generate(
168168
else:
169169
params.update(kwargs)
170170

171-
# 4) Override model & max_tokens if provided
171+
# 4) Add the stream parameter explicitly to params
172+
params["stream"] = stream
173+
174+
# 5) Override model & max_tokens if provided
172175
params["model"] = model or self.model
173176
params["max_tokens"] = max_tokens or self.max_tokens
174177

175-
# 5) Inject tools / response_format / structured_mode
178+
# 6) Inject tools / response_format / structured_mode
176179
params = RequestHandler.process_params(
177180
params,
178181
llm_provider=self.provider,
@@ -181,11 +184,11 @@ def generate(
181184
structured_mode=structured_mode,
182185
)
183186

184-
# 6) Call NVIDIA API + dispatch to ResponseHandler
187+
# 7) Call NVIDIA API + dispatch to ResponseHandler
185188
try:
186189
logger.info("Calling NVIDIA ChatCompletion API.")
187190
logger.debug(f"Parameters: {params}")
188-
resp = self.client.chat.completions.create(**params, stream=stream)
191+
resp = self.client.chat.completions.create(**params)
189192
return ResponseHandler.process_response(
190193
response=resp,
191194
llm_provider=self.provider,

dapr_agents/llm/openai/chat.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -194,10 +194,13 @@ def generate(
194194
else:
195195
params.update(kwargs)
196196

197-
# 4) Override model if given
197+
# 4) Add the stream parameter explicitly to params
198+
params["stream"] = stream
199+
200+
# 5) Override model if given
198201
params["model"] = model or self.model
199202

200-
# 5) Let RequestHandler inject tools / response_format / structured_mode
203+
# 6) Let RequestHandler inject tools / response_format / structured_mode
201204
params = RequestHandler.process_params(
202205
params,
203206
llm_provider=self.provider,
@@ -206,13 +209,11 @@ def generate(
206209
structured_mode=structured_mode,
207210
)
208211

209-
# 6) Call API + hand off to ResponseHandler
212+
# 7) Call API + hand off to ResponseHandler
210213
try:
211214
logger.info("Calling OpenAI ChatCompletion...")
212215
logger.debug(f"ChatCompletion params: {params}")
213-
resp = self.client.chat.completions.create(
214-
**params, stream=stream, timeout=self.timeout
215-
)
216+
resp = self.client.chat.completions.create(**params, timeout=self.timeout)
216217
logger.info("ChatCompletion response received.")
217218
return ResponseHandler.process_response(
218219
response=resp,

dapr_agents/llm/openai/client/openai.py

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class OpenAIClient:
1717

1818
def __init__(
1919
self,
20-
api_key: str,
20+
api_key: Optional[str] = None,
2121
base_url: Optional[str] = None,
2222
organization: Optional[str] = None,
2323
project: Optional[str] = None,
@@ -27,16 +27,16 @@ def __init__(
2727
Initializes the OpenAI client with API key, base URL, and organization.
2828
2929
Args:
30-
api_key: The OpenAI API key.
30+
api_key: The OpenAI API key (will fall back to OPENAI_API_KEY env var if not provided).
3131
base_url: The base URL for OpenAI API (defaults to https://api.openai.com/v1).
3232
organization: The OpenAI organization (optional).
3333
project: The OpenAI Project name (optional).
3434
timeout: Timeout for requests (default is 1500 seconds).
3535
"""
36-
self.api_key = api_key # or inferred from OPENAI_API_KEY env variable.
37-
self.base_url = base_url # or set to "https://api.openai.com/v1" by default.
38-
self.organization = organization # or inferred from OPENAI_ORG_ID env variable.
39-
self.project = project # or inferred from OPENAI_PROJECT_ID env variable.
36+
self.api_key = api_key # Will be None if not provided - OpenAI SDK will handle env var fallback
37+
self.base_url = base_url
38+
self.organization = organization
39+
self.project = project
4040
self.timeout = HTTPHelper.configure_timeout(timeout)
4141

4242
def get_client(self) -> OpenAI:
@@ -46,13 +46,19 @@ def get_client(self) -> OpenAI:
4646
Returns:
4747
OpenAI: The initialized OpenAI client.
4848
"""
49-
return OpenAI(
50-
api_key=self.api_key,
51-
base_url=self.base_url,
52-
organization=self.organization,
53-
project=self.project,
54-
timeout=self.timeout,
55-
)
49+
# Build kwargs, only including non-None values so SDK can fall back to env vars
50+
kwargs = {}
51+
if self.api_key is not None:
52+
kwargs["api_key"] = self.api_key
53+
if self.base_url is not None:
54+
kwargs["base_url"] = self.base_url
55+
if self.organization is not None:
56+
kwargs["organization"] = self.organization
57+
if self.project is not None:
58+
kwargs["project"] = self.project
59+
kwargs["timeout"] = self.timeout
60+
61+
return OpenAI(**kwargs)
5662

5763
@classmethod
5864
def from_config(

dapr_agents/observability/__init__.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
from .instrumentor import DaprAgentsInstrumentor
2+
3+
4+
__all__ = [
5+
"DaprAgentsInstrumentor",
6+
]

0 commit comments

Comments
 (0)