Skip to content

Commit 4971007

Browse files
authored
openai: Use semconv-1.31.0 (#77)
* Use "gen_ai.output.type" instead of deprecated "gen_ai.openai.request.response_format" attribute. * Use json instead of json_schema and json_object for gen_ai.output.type attribute value * Bump schema version number
1 parent 6030284 commit 4971007

File tree

4 files changed

+20
-17
lines changed

4 files changed

+20
-17
lines changed

instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/__init__.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -87,14 +87,14 @@ def _instrument(self, **kwargs):
8787
__name__,
8888
__version__,
8989
tracer_provider,
90-
schema_url=Schemas.V1_28_0.value,
90+
schema_url=Schemas.V1_31_0.value,
9191
)
9292
meter_provider = kwargs.get("meter_provider")
9393
self.meter = get_meter(
9494
__name__,
9595
__version__,
9696
meter_provider,
97-
schema_url=Schemas.V1_28_0.value,
97+
schema_url=Schemas.V1_31_0.value,
9898
)
9999
event_logger_provider = kwargs.get("event_logger_provider")
100100
self.event_logger = get_event_logger(__name__, event_logger_provider)

instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py

+7-4
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,10 @@
2020

2121
from opentelemetry._events import Event, EventLogger
2222
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
23-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT,
2423
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
2524
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
2625
GEN_AI_OPERATION_NAME,
26+
GEN_AI_OUTPUT_TYPE,
2727
GEN_AI_REQUEST_CHOICE_COUNT,
2828
GEN_AI_REQUEST_FREQUENCY_PENALTY,
2929
GEN_AI_REQUEST_MAX_TOKENS,
@@ -166,13 +166,16 @@ def _is_set(value):
166166
# response_format may be string or object with a string in the `type` key
167167
if isinstance(response_format, Mapping):
168168
if _is_set(response_format_type := response_format.get("type")):
169-
span_attributes[GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT] = response_format_type
169+
if response_format_type in ("json_object", "json_schema"):
170+
span_attributes[GEN_AI_OUTPUT_TYPE] = "json"
171+
else:
172+
span_attributes[GEN_AI_OUTPUT_TYPE] = response_format_type
170173
elif isinstance(response_format, str):
171-
span_attributes[GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT] = response_format
174+
span_attributes[GEN_AI_OUTPUT_TYPE] = response_format
172175
else:
173176
# Assume structured output lazily parsed to a schema via type_to_response_format_param or similar.
174177
# e.g. pydantic._internal._model_construction.ModelMetaclass
175-
span_attributes[GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT] = "json_schema"
178+
span_attributes[GEN_AI_OUTPUT_TYPE] = "json"
176179

177180
return span_attributes
178181

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_beta_chat_completions.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@
2828
from opentelemetry._logs import LogRecord
2929
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
3030
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
31-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT,
3231
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
3332
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
3433
GEN_AI_OPERATION_NAME,
34+
GEN_AI_OUTPUT_TYPE,
3535
GEN_AI_REQUEST_CHOICE_COUNT,
3636
GEN_AI_REQUEST_FREQUENCY_PENALTY,
3737
GEN_AI_REQUEST_MAX_TOKENS,
@@ -248,7 +248,7 @@ def test_chat_all_the_client_options(default_openai_env, trace_exporter, metrics
248248
expected_attrs = {
249249
GEN_AI_REQUEST_SEED: 100,
250250
GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default",
251-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text",
251+
GEN_AI_OUTPUT_TYPE: "text",
252252
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
253253
GEN_AI_OPERATION_NAME: "chat",
254254
GEN_AI_REQUEST_FREQUENCY_PENALTY: 0,
@@ -1502,14 +1502,14 @@ def test_chat_exported_schema_version(default_openai_env, trace_exporter, metric
15021502

15031503
spans = trace_exporter.get_finished_spans()
15041504
(span,) = spans
1505-
assert span.instrumentation_scope.schema_url == "https://opentelemetry.io/schemas/1.28.0"
1505+
assert span.instrumentation_scope.schema_url == "https://opentelemetry.io/schemas/1.31.0"
15061506

15071507
metrics_data = metrics_reader.get_metrics_data()
15081508
resource_metrics = metrics_data.resource_metrics
15091509

15101510
for metrics in resource_metrics:
15111511
for scope_metrics in metrics.scope_metrics:
1512-
assert scope_metrics.schema_url == "https://opentelemetry.io/schemas/1.28.0"
1512+
assert scope_metrics.schema_url == "https://opentelemetry.io/schemas/1.31.0"
15131513

15141514

15151515
@pytest.mark.skipif(OPENAI_VERSION < (1, 40, 0), reason="beta completions added in 1.40.0")
@@ -1545,7 +1545,7 @@ def test_parse_response_format_json_object_with_capture_message_content(
15451545
address, port = address_and_port(client)
15461546
assert dict(span.attributes) == {
15471547
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
1548-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "json_object",
1548+
GEN_AI_OUTPUT_TYPE: "json",
15491549
GEN_AI_OPERATION_NAME: "chat",
15501550
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
15511551
GEN_AI_SYSTEM: "openai",
@@ -1620,7 +1620,7 @@ def test_parse_response_format_structured_output_with_capture_message_content(
16201620
address, port = address_and_port(client)
16211621
assert dict(span.attributes) == {
16221622
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
1623-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "json_schema",
1623+
GEN_AI_OUTPUT_TYPE: "json",
16241624
GEN_AI_OPERATION_NAME: "chat",
16251625
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
16261626
GEN_AI_SYSTEM: "openai",

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,10 @@
2727
from opentelemetry._logs import LogRecord
2828
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
2929
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
30-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT,
3130
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
3231
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
3332
GEN_AI_OPERATION_NAME,
33+
GEN_AI_OUTPUT_TYPE,
3434
GEN_AI_REQUEST_CHOICE_COUNT,
3535
GEN_AI_REQUEST_FREQUENCY_PENALTY,
3636
GEN_AI_REQUEST_MAX_TOKENS,
@@ -332,7 +332,7 @@ def test_chat_all_the_client_options(default_openai_env, trace_exporter, metrics
332332
expected_attrs = {
333333
GEN_AI_REQUEST_SEED: 100,
334334
GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default",
335-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text",
335+
GEN_AI_OUTPUT_TYPE: "text",
336336
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
337337
GEN_AI_OPERATION_NAME: "chat",
338338
GEN_AI_REQUEST_FREQUENCY_PENALTY: 0,
@@ -1204,7 +1204,7 @@ def test_chat_stream_all_the_client_options(default_openai_env, trace_exporter,
12041204
address, port = address_and_port(client)
12051205
expected_attrs = {
12061206
GEN_AI_REQUEST_SEED: 100,
1207-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text",
1207+
GEN_AI_OUTPUT_TYPE: "text",
12081208
GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default",
12091209
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
12101210
GEN_AI_OPERATION_NAME: "chat",
@@ -2444,14 +2444,14 @@ def test_chat_exported_schema_version(default_openai_env, trace_exporter, metric
24442444

24452445
spans = trace_exporter.get_finished_spans()
24462446
(span,) = spans
2447-
assert span.instrumentation_scope.schema_url == "https://opentelemetry.io/schemas/1.28.0"
2447+
assert span.instrumentation_scope.schema_url == "https://opentelemetry.io/schemas/1.31.0"
24482448

24492449
metrics_data = metrics_reader.get_metrics_data()
24502450
resource_metrics = metrics_data.resource_metrics
24512451

24522452
for metrics in resource_metrics:
24532453
for scope_metrics in metrics.scope_metrics:
2454-
assert scope_metrics.schema_url == "https://opentelemetry.io/schemas/1.28.0"
2454+
assert scope_metrics.schema_url == "https://opentelemetry.io/schemas/1.31.0"
24552455

24562456

24572457
@dataclass

0 commit comments

Comments
 (0)