Skip to content

Commit d5c54e3

Browse files
authored
Record logger name as the instrumentation scope name (#4208)
1 parent d1904b9 commit d5c54e3

File tree

5 files changed

+231
-17
lines changed

5 files changed

+231
-17
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2323
([#4206](https://github.com/open-telemetry/opentelemetry-python/pull/4206))
2424
- Update environment variable descriptions to match signal
2525
([#4222](https://github.com/open-telemetry/opentelemetry-python/pull/4222))
26+
- Record logger name as the instrumentation scope name
27+
([#4208](https://github.com/open-telemetry/opentelemetry-python/pull/4208))
2628

2729
## Version 1.27.0/0.48b0 (2024-08-28)
2830

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import logging
2+
3+
import pytest
4+
5+
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
6+
from opentelemetry.sdk._logs.export import (
7+
InMemoryLogExporter,
8+
SimpleLogRecordProcessor,
9+
)
10+
11+
12+
def _set_up_logging_handler(level):
13+
logger_provider = LoggerProvider()
14+
exporter = InMemoryLogExporter()
15+
processor = SimpleLogRecordProcessor(exporter=exporter)
16+
logger_provider.add_log_record_processor(processor)
17+
handler = LoggingHandler(level=level, logger_provider=logger_provider)
18+
return handler
19+
20+
21+
def _create_logger(handler, name):
22+
logger = logging.getLogger(name)
23+
logger.addHandler(handler)
24+
return logger
25+
26+
27+
@pytest.mark.parametrize("num_loggers", [1, 10, 100, 1000])
28+
def test_simple_get_logger_different_names(benchmark, num_loggers):
29+
handler = _set_up_logging_handler(level=logging.DEBUG)
30+
loggers = [
31+
_create_logger(handler, str(f"logger_{i}")) for i in range(num_loggers)
32+
]
33+
34+
def benchmark_get_logger():
35+
for index in range(1000):
36+
loggers[index % num_loggers].warning("test message")
37+
38+
benchmark(benchmark_get_logger)

opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py

Lines changed: 48 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import traceback
2222
import warnings
2323
from os import environ
24+
from threading import Lock
2425
from time import time_ns
2526
from typing import Any, Callable, Optional, Tuple, Union # noqa
2627

@@ -471,9 +472,6 @@ def __init__(
471472
) -> None:
472473
super().__init__(level=level)
473474
self._logger_provider = logger_provider or get_logger_provider()
474-
self._logger = get_logger(
475-
__name__, logger_provider=self._logger_provider
476-
)
477475

478476
@staticmethod
479477
def _get_attributes(record: logging.LogRecord) -> Attributes:
@@ -558,6 +556,7 @@ def _translate(self, record: logging.LogRecord) -> LogRecord:
558556
"WARN" if record.levelname == "WARNING" else record.levelname
559557
)
560558

559+
logger = get_logger(record.name, logger_provider=self._logger_provider)
561560
return LogRecord(
562561
timestamp=timestamp,
563562
observed_timestamp=observered_timestamp,
@@ -567,7 +566,7 @@ def _translate(self, record: logging.LogRecord) -> LogRecord:
567566
severity_text=level_name,
568567
severity_number=severity_number,
569568
body=body,
570-
resource=self._logger.resource,
569+
resource=logger.resource,
571570
attributes=attributes,
572571
)
573572

@@ -577,14 +576,17 @@ def emit(self, record: logging.LogRecord) -> None:
577576
578577
The record is translated to OTel format, and then sent across the pipeline.
579578
"""
580-
if not isinstance(self._logger, NoOpLogger):
581-
self._logger.emit(self._translate(record))
579+
logger = get_logger(record.name, logger_provider=self._logger_provider)
580+
if not isinstance(logger, NoOpLogger):
581+
logger.emit(self._translate(record))
582582

583583
def flush(self) -> None:
584584
"""
585-
Flushes the logging output. Skip flushing if logger is NoOp.
585+
Flushes the logging output. Skip flushing if logging_provider has no force_flush method.
586586
"""
587-
if not isinstance(self._logger, NoOpLogger):
587+
if hasattr(self._logger_provider, "force_flush") and callable(
588+
self._logger_provider.force_flush
589+
):
588590
self._logger_provider.force_flush()
589591

590592

@@ -642,26 +644,20 @@ def __init__(
642644
self._at_exit_handler = None
643645
if shutdown_on_exit:
644646
self._at_exit_handler = atexit.register(self.shutdown)
647+
self._logger_cache = {}
648+
self._logger_cache_lock = Lock()
645649

646650
@property
647651
def resource(self):
648652
return self._resource
649653

650-
def get_logger(
654+
def _get_logger_no_cache(
651655
self,
652656
name: str,
653657
version: Optional[str] = None,
654658
schema_url: Optional[str] = None,
655659
attributes: Optional[Attributes] = None,
656660
) -> Logger:
657-
if self._disabled:
658-
_logger.warning("SDK is disabled.")
659-
return NoOpLogger(
660-
name,
661-
version=version,
662-
schema_url=schema_url,
663-
attributes=attributes,
664-
)
665661
return Logger(
666662
self._resource,
667663
self._multi_log_record_processor,
@@ -673,6 +669,41 @@ def get_logger(
673669
),
674670
)
675671

672+
def _get_logger_cached(
673+
self,
674+
name: str,
675+
version: Optional[str] = None,
676+
schema_url: Optional[str] = None,
677+
) -> Logger:
678+
with self._logger_cache_lock:
679+
key = (name, version, schema_url)
680+
if key in self._logger_cache:
681+
return self._logger_cache[key]
682+
683+
self._logger_cache[key] = self._get_logger_no_cache(
684+
name, version, schema_url
685+
)
686+
return self._logger_cache[key]
687+
688+
def get_logger(
689+
self,
690+
name: str,
691+
version: Optional[str] = None,
692+
schema_url: Optional[str] = None,
693+
attributes: Optional[Attributes] = None,
694+
) -> Logger:
695+
if self._disabled:
696+
_logger.warning("SDK is disabled.")
697+
return NoOpLogger(
698+
name,
699+
version=version,
700+
schema_url=schema_url,
701+
attributes=attributes,
702+
)
703+
if attributes is None:
704+
return self._get_logger_cached(name, version, schema_url)
705+
return self._get_logger_no_cache(name, version, schema_url, attributes)
706+
676707
def add_log_record_processor(
677708
self, log_record_processor: LogRecordProcessor
678709
):

opentelemetry-sdk/tests/logs/test_export.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,9 @@ def test_simple_log_record_processor_default_level(self):
7171
self.assertEqual(
7272
warning_log_record.severity_number, SeverityNumber.WARN
7373
)
74+
self.assertEqual(
75+
finished_logs[0].instrumentation_scope.name, "default_level"
76+
)
7477

7578
def test_simple_log_record_processor_custom_level(self):
7679
exporter = InMemoryLogExporter()
@@ -104,6 +107,12 @@ def test_simple_log_record_processor_custom_level(self):
104107
self.assertEqual(
105108
fatal_log_record.severity_number, SeverityNumber.FATAL
106109
)
110+
self.assertEqual(
111+
finished_logs[0].instrumentation_scope.name, "custom_level"
112+
)
113+
self.assertEqual(
114+
finished_logs[1].instrumentation_scope.name, "custom_level"
115+
)
107116

108117
def test_simple_log_record_processor_trace_correlation(self):
109118
exporter = InMemoryLogExporter()
@@ -129,6 +138,9 @@ def test_simple_log_record_processor_trace_correlation(self):
129138
self.assertEqual(
130139
log_record.trace_flags, INVALID_SPAN_CONTEXT.trace_flags
131140
)
141+
self.assertEqual(
142+
finished_logs[0].instrumentation_scope.name, "trace_correlation"
143+
)
132144
exporter.clear()
133145

134146
tracer = trace.TracerProvider().get_tracer(__name__)
@@ -140,6 +152,10 @@ def test_simple_log_record_processor_trace_correlation(self):
140152
self.assertEqual(log_record.body, "Critical message within span")
141153
self.assertEqual(log_record.severity_text, "CRITICAL")
142154
self.assertEqual(log_record.severity_number, SeverityNumber.FATAL)
155+
self.assertEqual(
156+
finished_logs[0].instrumentation_scope.name,
157+
"trace_correlation",
158+
)
143159
span_context = span.get_span_context()
144160
self.assertEqual(log_record.trace_id, span_context.trace_id)
145161
self.assertEqual(log_record.span_id, span_context.span_id)
@@ -166,6 +182,9 @@ def test_simple_log_record_processor_shutdown(self):
166182
self.assertEqual(
167183
warning_log_record.severity_number, SeverityNumber.WARN
168184
)
185+
self.assertEqual(
186+
finished_logs[0].instrumentation_scope.name, "shutdown"
187+
)
169188
exporter.clear()
170189
logger_provider.shutdown()
171190
with self.assertLogs(level=logging.WARNING):
@@ -206,6 +225,10 @@ def test_simple_log_record_processor_different_msg_types(self):
206225
for item in finished_logs
207226
]
208227
self.assertEqual(expected, emitted)
228+
for item in finished_logs:
229+
self.assertEqual(
230+
item.instrumentation_scope.name, "different_msg_types"
231+
)
209232

210233
def test_simple_log_record_processor_different_msg_types_with_formatter(
211234
self,
@@ -428,6 +451,8 @@ def test_shutdown(self):
428451
for item in finished_logs
429452
]
430453
self.assertEqual(expected, emitted)
454+
for item in finished_logs:
455+
self.assertEqual(item.instrumentation_scope.name, "shutdown")
431456

432457
def test_force_flush(self):
433458
exporter = InMemoryLogExporter()
@@ -447,6 +472,9 @@ def test_force_flush(self):
447472
log_record = finished_logs[0].log_record
448473
self.assertEqual(log_record.body, "Earth is burning")
449474
self.assertEqual(log_record.severity_number, SeverityNumber.FATAL)
475+
self.assertEqual(
476+
finished_logs[0].instrumentation_scope.name, "force_flush"
477+
)
450478

451479
def test_log_record_processor_too_many_logs(self):
452480
exporter = InMemoryLogExporter()
@@ -465,6 +493,8 @@ def test_log_record_processor_too_many_logs(self):
465493
self.assertTrue(log_record_processor.force_flush())
466494
finised_logs = exporter.get_finished_logs()
467495
self.assertEqual(len(finised_logs), 1000)
496+
for item in finised_logs:
497+
self.assertEqual(item.instrumentation_scope.name, "many_logs")
468498

469499
def test_with_multiple_threads(self):
470500
exporter = InMemoryLogExporter()
@@ -492,6 +522,8 @@ def bulk_log_and_flush(num_logs):
492522

493523
finished_logs = exporter.get_finished_logs()
494524
self.assertEqual(len(finished_logs), 2415)
525+
for item in finished_logs:
526+
self.assertEqual(item.instrumentation_scope.name, "threads")
495527

496528
@unittest.skipUnless(
497529
hasattr(os, "fork"),
Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
import logging
2+
import unittest
3+
4+
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
5+
from opentelemetry.sdk._logs.export import (
6+
InMemoryLogExporter,
7+
SimpleLogRecordProcessor,
8+
)
9+
10+
11+
def set_up_logging_handler(level):
12+
logger_provider = LoggerProvider()
13+
exporter = InMemoryLogExporter()
14+
processor = SimpleLogRecordProcessor(exporter=exporter)
15+
logger_provider.add_log_record_processor(processor)
16+
handler = LoggingHandler(level=level, logger_provider=logger_provider)
17+
return handler, logger_provider
18+
19+
20+
def create_logger(handler, name):
21+
logger = logging.getLogger(name)
22+
logger.addHandler(handler)
23+
return logger
24+
25+
26+
class TestLoggerProviderCache(unittest.TestCase):
27+
28+
def test_get_logger_single_handler(self):
29+
handler, logger_provider = set_up_logging_handler(level=logging.DEBUG)
30+
# pylint: disable=protected-access
31+
logger_cache = logger_provider._logger_cache
32+
logger = create_logger(handler, "test_logger")
33+
34+
# Ensure logger is lazily cached
35+
self.assertEqual(0, len(logger_cache))
36+
37+
with self.assertLogs(level=logging.WARNING):
38+
logger.warning("test message")
39+
40+
self.assertEqual(1, len(logger_cache))
41+
42+
# Ensure only one logger is cached
43+
with self.assertLogs(level=logging.WARNING):
44+
rounds = 100
45+
for _ in range(rounds):
46+
logger.warning("test message")
47+
48+
self.assertEqual(1, len(logger_cache))
49+
50+
def test_get_logger_multiple_loggers(self):
51+
handler, logger_provider = set_up_logging_handler(level=logging.DEBUG)
52+
# pylint: disable=protected-access
53+
logger_cache = logger_provider._logger_cache
54+
55+
num_loggers = 10
56+
loggers = [create_logger(handler, str(i)) for i in range(num_loggers)]
57+
58+
# Ensure loggers are lazily cached
59+
self.assertEqual(0, len(logger_cache))
60+
61+
with self.assertLogs(level=logging.WARNING):
62+
for logger in loggers:
63+
logger.warning("test message")
64+
65+
self.assertEqual(num_loggers, len(logger_cache))
66+
67+
with self.assertLogs(level=logging.WARNING):
68+
rounds = 100
69+
for _ in range(rounds):
70+
for logger in loggers:
71+
logger.warning("test message")
72+
73+
self.assertEqual(num_loggers, len(logger_cache))
74+
75+
def test_provider_get_logger_no_cache(self):
76+
_, logger_provider = set_up_logging_handler(level=logging.DEBUG)
77+
# pylint: disable=protected-access
78+
logger_cache = logger_provider._logger_cache
79+
80+
logger_provider.get_logger(
81+
name="test_logger",
82+
version="version",
83+
schema_url="schema_url",
84+
attributes={"key": "value"},
85+
)
86+
87+
# Ensure logger is not cached if attributes is set
88+
self.assertEqual(0, len(logger_cache))
89+
90+
def test_provider_get_logger_cached(self):
91+
_, logger_provider = set_up_logging_handler(level=logging.DEBUG)
92+
# pylint: disable=protected-access
93+
logger_cache = logger_provider._logger_cache
94+
95+
logger_provider.get_logger(
96+
name="test_logger",
97+
version="version",
98+
schema_url="schema_url",
99+
)
100+
101+
# Ensure only one logger is cached
102+
self.assertEqual(1, len(logger_cache))
103+
104+
logger_provider.get_logger(
105+
name="test_logger",
106+
version="version",
107+
schema_url="schema_url",
108+
)
109+
110+
# Ensure only one logger is cached
111+
self.assertEqual(1, len(logger_cache))

0 commit comments

Comments
 (0)