Skip to content

Commit 1cd56df

Browse files
committed
Fixing method names for async
1 parent 51f1ae2 commit 1cd56df

File tree

5 files changed

+155
-21
lines changed

5 files changed

+155
-21
lines changed

python/instrumentation/openinference-instrumentation-haystack/examples/retriever_component_run.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
import asyncio
2+
13
from haystack import Document
24
from haystack.components.retrievers.in_memory import InMemoryBM25Retriever
35
from haystack.document_stores.in_memory import InMemoryDocumentStore
@@ -29,5 +31,14 @@
2931
]
3032
document_store.write_documents(documents=documents)
3133

32-
retriever = InMemoryBM25Retriever(document_store=document_store)
33-
print(retriever.run(query="How many languages are spoken around the world today?"))
34+
35+
async def run():
36+
retriever = InMemoryBM25Retriever(document_store=document_store)
37+
result = await retriever.run_async(
38+
query="How many languages are spoken around the world today?"
39+
)
40+
print(result)
41+
42+
43+
if __name__ == "__main__":
44+
asyncio.run(run())

python/instrumentation/openinference-instrumentation-haystack/src/openinference/instrumentation/haystack/_wrappers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def __call__(
134134
bound_arguments = _get_bound_arguments(wrapped, *args, **kwargs)
135135

136136
with self._tracer.start_as_current_span(
137-
name=_get_component_span_name(component_class_name)
137+
name=_get_component_span_name(component_class_name, wrapped)
138138
) as span:
139139
component_type = _get_component_type(component)
140140
span.set_attributes(_set_component_runner_request_attributes(bound_arguments, instance))
@@ -170,7 +170,7 @@ async def __call__(
170170
bound_arguments = _get_bound_arguments(wrapped, *args, **kwargs)
171171
component_type = _get_component_type(instance)
172172
with self._tracer.start_as_current_span(
173-
name=_get_component_span_name(component_class_name),
173+
name=_get_component_span_name(component_class_name, wrapped),
174174
attributes=_set_component_runner_request_attributes(bound_arguments, instance),
175175
) as span:
176176
result = await wrapped(*args, **kwargs)
@@ -325,11 +325,11 @@ def _get_component_class_name(component: "Component") -> str:
325325
return str(component.__class__.__name__)
326326

327327

328-
def _get_component_span_name(component_class_name: str) -> str:
328+
def _get_component_span_name(component_class_name: str, wrapped: Callable[..., Any]) -> str:
329329
"""
330330
Gets the name of the span for a component.
331331
"""
332-
return f"{component_class_name}.run"
332+
return f"{component_class_name}.{wrapped.__name__}"
333333

334334

335335
def _get_component_type(component: "Component") -> ComponentType:
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
interactions:
2+
- request:
3+
body: '{"messages":[{"role":"user","content":"Search for Python tutorials"}],"model":"gpt-4o-mini","n":1,"stream":false,"tools":[{"type":"function","function":{"name":"search","description":"Search
4+
documents","parameters":{"type":"object","properties":{"query":{"type":"string"},"user_context":{"type":"string"}},"required":["query"]}}}]}'
5+
headers: {}
6+
method: POST
7+
uri: https://api.openai.com/v1/chat/completions
8+
response:
9+
body:
10+
string: "{\n \"id\": \"chatcmpl-CWletHyGzapkvp0MG02ee7Jwn1x7B\",\n \"object\":
11+
\"chat.completion\",\n \"created\": 1761925731,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
12+
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
13+
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
14+
\ \"id\": \"call_BjOwGgWvcqqtxtVBYWjhPRfu\",\n \"type\":
15+
\"function\",\n \"function\": {\n \"name\": \"search\",\n
16+
\ \"arguments\": \"{\\\"query\\\":\\\"Python tutorials\\\"}\"\n
17+
\ }\n }\n ],\n \"refusal\": null,\n \"annotations\":
18+
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
19+
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 48,\n \"completion_tokens\":
20+
14,\n \"total_tokens\": 62,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
21+
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
22+
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
23+
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
24+
\"default\",\n \"system_fingerprint\": \"fp_51db84afab\"\n}\n"
25+
headers: {}
26+
status:
27+
code: 200
28+
message: OK
29+
- request:
30+
body: '{"messages":[{"role":"user","content":"Search for Python tutorials"},{"role":"assistant","tool_calls":[{"type":"function","function":{"name":"search","arguments":"{\"query\":
31+
\"Python tutorials\"}"},"id":"call_BjOwGgWvcqqtxtVBYWjhPRfu"}]},{"role":"tool","content":"{''results'':
32+
[\"Found results for ''Python tutorials'' (user: Alice)\"]}","tool_call_id":"call_BjOwGgWvcqqtxtVBYWjhPRfu"}],"model":"gpt-4o-mini","n":1,"stream":false,"tools":[{"type":"function","function":{"name":"search","description":"Search
33+
documents","parameters":{"type":"object","properties":{"query":{"type":"string"},"user_context":{"type":"string"}},"required":["query"]}}}]}'
34+
headers: {}
35+
method: POST
36+
uri: https://api.openai.com/v1/chat/completions
37+
response:
38+
body:
39+
string: "{\n \"id\": \"chatcmpl-CWleuO1zD8b4RnjEM1j1FOmAKwpQy\",\n \"object\":
40+
\"chat.completion\",\n \"created\": 1761925732,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
41+
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
42+
\"assistant\",\n \"content\": \"I found some results for \\\"Python
43+
tutorials.\\\" Would you like more specific information about them?\",\n \"refusal\":
44+
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
45+
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
46+
87,\n \"completion_tokens\": 19,\n \"total_tokens\": 106,\n \"prompt_tokens_details\":
47+
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
48+
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
49+
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
50+
\"default\",\n \"system_fingerprint\": \"fp_51db84afab\"\n}\n"
51+
headers: {}
52+
status:
53+
code: 200
54+
message: OK
55+
version: 1

python/instrumentation/openinference-instrumentation-haystack/tests/openinference/haystack/test_instrumentor.py

Lines changed: 83 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ async def test_async_pipeline_with_chat_prompt_builder_and_chat_generator_produc
157157
span = spans[1]
158158
assert span.status.is_ok
159159
assert not span.events
160-
assert span.name == "OpenAIChatGenerator.run"
160+
assert span.name == "OpenAIChatGenerator.run_async"
161161
attributes = dict(span.attributes or {})
162162
assert attributes.pop(OPENINFERENCE_SPAN_KIND) == LLM
163163
assert attributes.pop(INPUT_MIME_TYPE) == JSON
@@ -393,7 +393,7 @@ async def test_haystack_instrumentation_async_pipeline_filtering(
393393
spans = in_memory_span_exporter.get_finished_spans()
394394

395395
assert [span.name for span in spans] == [
396-
"InMemoryBM25Retriever.run",
396+
"InMemoryBM25Retriever.run_async",
397397
"AsyncPipeline.run_async_generator",
398398
"AsyncPipeline.run_async",
399399
]
@@ -539,13 +539,13 @@ def test_async_pipeline_tool_calling_llm_span_has_expected_attributes(
539539
spans = in_memory_span_exporter.get_finished_spans()
540540
assert len(spans) == 4
541541
assert [span.name for span in spans] == [
542-
"OpenAIChatGenerator.run",
542+
"OpenAIChatGenerator.run_async",
543543
"AsyncPipeline.run_async_generator",
544544
"AsyncPipeline.run_async",
545545
"AsyncPipeline.run",
546546
]
547547
span = spans[0]
548-
assert span.name == "OpenAIChatGenerator.run"
548+
assert span.name == "OpenAIChatGenerator.run_async"
549549
assert span.status.is_ok
550550
assert not span.events
551551
attributes = dict(span.attributes or {})
@@ -701,13 +701,13 @@ async def test_async_pipeline_openai_chat_generator_llm_span_has_expected_attrib
701701
spans = in_memory_span_exporter.get_finished_spans()
702702
assert len(spans) == 2
703703
assert [span.name for span in spans] == [
704-
"OpenAIChatGenerator.run",
704+
"OpenAIChatGenerator.run_async",
705705
"AsyncPipeline.run_async_generator",
706706
]
707707
span = spans[0]
708708
assert span.status.is_ok
709709
assert not span.events
710-
assert span.name == "OpenAIChatGenerator.run"
710+
assert span.name == "OpenAIChatGenerator.run_async"
711711
attributes = dict(span.attributes or {})
712712
assert attributes.pop(OPENINFERENCE_SPAN_KIND) == "LLM"
713713
assert (
@@ -1182,16 +1182,21 @@ def test_pipeline_and_component_spans_contain_context_attributes(
11821182
assert attributes.get(LLM_PROMPT_TEMPLATE_VARIABLES, '{"var_name": "var-value"}')
11831183

11841184

1185+
@pytest.mark.parametrize("use_async", [False, True])
11851186
@pytest.mark.vcr(
11861187
decode_compressed_response=True,
11871188
before_record_request=remove_all_vcr_request_headers,
11881189
before_record_response=remove_all_vcr_response_headers,
1190+
record_mode="once",
11891191
)
1190-
def test_agent_run_component_spans(
1192+
async def test_agent_run_component_spans(
11911193
openai_api_key: str,
11921194
in_memory_span_exporter: InMemorySpanExporter,
11931195
setup_haystack_instrumentation: Any,
1196+
use_async: bool,
11941197
) -> None:
1198+
run_method = "run_async" if use_async else "run"
1199+
11951200
def search_documents(query: str, user_context: str) -> Dict[str, Any]:
11961201
"""Search documents using query and user context."""
11971202
return {"results": [f"Found results for '{query}' (user: {user_context})"]}
@@ -1212,10 +1217,14 @@ def search_documents(query: str, user_context: str) -> Dict[str, Any]:
12121217
tools=[search_tool],
12131218
state_schema={"user_name": {"type": str}, "search_results": {"type": list}},
12141219
)
1215-
1216-
result = agent.run(
1217-
messages=[ChatMessage.from_user("Search for Python tutorials")], user_name="Alice"
1218-
)
1220+
if use_async:
1221+
result = await agent.run_async(
1222+
messages=[ChatMessage.from_user("Search for Python tutorials")], user_name="Alice"
1223+
)
1224+
else:
1225+
result = agent.run(
1226+
messages=[ChatMessage.from_user("Search for Python tutorials")], user_name="Alice"
1227+
)
12191228
last_message = result["last_message"]
12201229
assert last_message.role.name == "ASSISTANT"
12211230
assert last_message.text == (
@@ -1225,7 +1234,7 @@ def search_documents(query: str, user_context: str) -> Dict[str, Any]:
12251234
spans = in_memory_span_exporter.get_finished_spans()
12261235
assert len(spans) == 4
12271236
openai_span = spans[0]
1228-
assert openai_span.name == "OpenAIChatGenerator.run"
1237+
assert openai_span.name == f"OpenAIChatGenerator.{run_method}"
12291238
assert openai_span.status.is_ok
12301239
attributes = dict(openai_span.attributes or {})
12311240
assert attributes.pop(OPENINFERENCE_SPAN_KIND) == "LLM"
@@ -1253,7 +1262,7 @@ def search_documents(query: str, user_context: str) -> Dict[str, Any]:
12531262
assert prompt_tokens + completion_tokens == total_tokens
12541263
assert not attributes
12551264
tool_invoker_span = spans[1]
1256-
assert tool_invoker_span.name == "ToolInvoker.run"
1265+
assert tool_invoker_span.name == f"ToolInvoker.{run_method}"
12571266
assert tool_invoker_span.status.is_ok
12581267
attributes = dict(tool_invoker_span.attributes or {})
12591268
assert attributes.pop(OPENINFERENCE_SPAN_KIND) == "CHAIN"
@@ -1263,7 +1272,7 @@ def search_documents(query: str, user_context: str) -> Dict[str, Any]:
12631272
assert isinstance(attributes.pop(OUTPUT_VALUE), str)
12641273
assert not attributes
12651274
openai_span = spans[2]
1266-
assert openai_span.name == "OpenAIChatGenerator.run"
1275+
assert openai_span.name == f"OpenAIChatGenerator.{run_method}"
12671276
assert openai_span.status.is_ok
12681277
attributes = dict(openai_span.attributes or {})
12691278
assert attributes.pop(OPENINFERENCE_SPAN_KIND) == "LLM"
@@ -1287,7 +1296,7 @@ def search_documents(query: str, user_context: str) -> Dict[str, Any]:
12871296
assert prompt_tokens + completion_tokens == total_tokens
12881297
assert not attributes
12891298
agent_run_span = spans[3] # root span
1290-
assert agent_run_span.name == "Agent.run"
1299+
assert agent_run_span.name == f"Agent.{run_method}"
12911300
assert agent_run_span.status.is_ok
12921301
attributes = dict(agent_run_span.attributes or {})
12931302
assert attributes.pop(OPENINFERENCE_SPAN_KIND) == "CHAIN"
@@ -1355,6 +1364,65 @@ def test_individual_component_without_child_components(
13551364
assert not attributes
13561365

13571366

1367+
@pytest.mark.vcr(
1368+
decode_compressed_response=True,
1369+
before_record_request=remove_all_vcr_request_headers,
1370+
before_record_response=remove_all_vcr_response_headers,
1371+
)
1372+
async def test_individual_component_run_async_without_child_components(
1373+
in_memory_span_exporter: InMemorySpanExporter,
1374+
setup_haystack_instrumentation: Any,
1375+
) -> None:
1376+
document_store = InMemoryDocumentStore()
1377+
documents = [
1378+
Document(content="There are over 7,000 languages spoken around the world today."),
1379+
Document(
1380+
content="Elephants have been observed to behave in a way that indicates "
1381+
"a high level of self-awareness, such as recognizing themselves "
1382+
"in mirrors."
1383+
),
1384+
Document(
1385+
content="In certain parts of the world, like the Maldives, Puerto Rico, "
1386+
"and San Diego, you can witness the phenomenon of bioluminescent"
1387+
" waves."
1388+
),
1389+
]
1390+
document_store.write_documents(documents=documents)
1391+
1392+
retriever = InMemoryBM25Retriever(document_store=document_store)
1393+
results = await retriever.run_async(
1394+
query="How many languages are spoken around the world today?"
1395+
)
1396+
assert results.get("documents") is not None
1397+
assert len(results["documents"]) == 3
1398+
for document in results["documents"]:
1399+
assert isinstance(document, Document)
1400+
assert document.id is not None
1401+
assert document.content_type == "text"
1402+
assert isinstance(document.content, str)
1403+
spans = in_memory_span_exporter.get_finished_spans()
1404+
assert len(spans) == 1
1405+
retriever_span = spans[0]
1406+
assert retriever_span.name == "InMemoryBM25Retriever.run_async"
1407+
assert retriever_span.status.is_ok
1408+
attributes = dict(retriever_span.attributes or {})
1409+
assert attributes.pop(OPENINFERENCE_SPAN_KIND) == "RETRIEVER"
1410+
assert attributes.pop(INPUT_MIME_TYPE) == JSON
1411+
assert isinstance(attributes.pop(INPUT_VALUE), str)
1412+
assert attributes.pop(OUTPUT_MIME_TYPE) == JSON
1413+
assert isinstance(attributes.pop(OUTPUT_VALUE), str)
1414+
for i, document in enumerate(results["documents"]):
1415+
prefix = f"{RETRIEVAL_DOCUMENTS}.{i}"
1416+
assert isinstance(content := attributes.pop(f"{prefix}.{DOCUMENT_CONTENT}"), str)
1417+
assert content == document.content
1418+
assert isinstance(doc_id := attributes.pop(f"{prefix}.{DOCUMENT_ID}"), str)
1419+
assert doc_id == document.id
1420+
assert isinstance(score := attributes.pop(f"{prefix}.{DOCUMENT_SCORE}"), float)
1421+
assert score == document.score
1422+
assert isinstance(attributes.pop(f"{prefix}.{DOCUMENT_METADATA}"), str)
1423+
assert not attributes
1424+
1425+
13581426
@pytest.fixture
13591427
def openai_api_key(monkeypatch: pytest.MonkeyPatch) -> None:
13601428
monkeypatch.setenv("OPENAI_API_KEY", "sk-")

0 commit comments

Comments
 (0)