@@ -1103,6 +1103,62 @@ def test_chat_stream(default_openai_env, trace_exporter, metrics_reader, logs_ex
1103
1103
)
1104
1104
1105
1105
1106
+ @pytest .mark .vcr ()
1107
+ def test_chat_stream_with_context_manager (default_openai_env , trace_exporter , metrics_reader , logs_exporter ):
1108
+ client = openai .OpenAI ()
1109
+
1110
+ messages = [
1111
+ {
1112
+ "role" : "user" ,
1113
+ "content" : TEST_CHAT_INPUT ,
1114
+ }
1115
+ ]
1116
+
1117
+ # Use a context manager for the streaming response
1118
+ with client .chat .completions .create (model = TEST_CHAT_MODEL , messages = messages , stream = True ) as chat_completion :
1119
+ chunks = [chunk .choices [0 ].delta .content or "" for chunk in chat_completion if chunk .choices ]
1120
+ assert "" .join (chunks ) == "South Atlantic Ocean."
1121
+
1122
+ spans = trace_exporter .get_finished_spans ()
1123
+ assert len (spans ) == 1
1124
+
1125
+ span = spans [0 ]
1126
+ assert span .name == f"chat { TEST_CHAT_MODEL } "
1127
+ assert span .kind == SpanKind .CLIENT
1128
+ assert span .status .status_code == StatusCode .UNSET
1129
+
1130
+ address , port = address_and_port (client )
1131
+ assert dict (span .attributes ) == {
1132
+ GEN_AI_OPERATION_NAME : "chat" ,
1133
+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
1134
+ GEN_AI_SYSTEM : "openai" ,
1135
+ GEN_AI_RESPONSE_ID : "chatcmpl-BOja7e365tj5upRjLFinadEB8ZoDL" ,
1136
+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
1137
+ GEN_AI_RESPONSE_FINISH_REASONS : ("stop" ,),
1138
+ SERVER_ADDRESS : address ,
1139
+ SERVER_PORT : port ,
1140
+ GEN_AI_OPENAI_RESPONSE_SERVICE_TIER : "default" ,
1141
+ }
1142
+
1143
+ logs = logs_exporter .get_finished_logs ()
1144
+ assert len (logs ) == 2
1145
+ log_records = logrecords_from_logs (logs )
1146
+ user_message , choice = log_records
1147
+ assert dict (user_message .attributes ) == {"gen_ai.system" : "openai" , "event.name" : "gen_ai.user.message" }
1148
+ assert dict (user_message .body ) == {}
1149
+
1150
+ assert_stop_log_record (choice )
1151
+
1152
+ (operation_duration_metric ,) = get_sorted_metrics (metrics_reader )
1153
+ attributes = {
1154
+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
1155
+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
1156
+ }
1157
+ assert_operation_duration_metric (
1158
+ client , "chat" , operation_duration_metric , attributes = attributes , min_data_point = 0.006761051714420319
1159
+ )
1160
+
1161
+
1106
1162
@pytest .mark .skipif (OPENAI_VERSION < (1 , 8 , 0 ), reason = "LegacyAPIResponse available" )
1107
1163
@pytest .mark .vcr ()
1108
1164
def test_chat_stream_with_raw_response (default_openai_env , trace_exporter , metrics_reader , logs_exporter ):
@@ -2096,6 +2152,67 @@ async def test_chat_async_stream(default_openai_env, trace_exporter, metrics_rea
2096
2152
)
2097
2153
2098
2154
2155
+ @pytest .mark .vcr ()
2156
+ @pytest .mark .asyncio
2157
+ async def test_chat_async_stream_with_context_manager (
2158
+ default_openai_env , trace_exporter , metrics_reader , logs_exporter
2159
+ ):
2160
+ client = openai .AsyncOpenAI ()
2161
+
2162
+ messages = [
2163
+ {
2164
+ "role" : "user" ,
2165
+ "content" : TEST_CHAT_INPUT ,
2166
+ }
2167
+ ]
2168
+
2169
+ # Use a context manager for the asynchronous streaming response
2170
+ async with await client .chat .completions .create (
2171
+ model = TEST_CHAT_MODEL , messages = messages , stream = True
2172
+ ) as chat_completion :
2173
+ chunks = [chunk .choices [0 ].delta .content or "" async for chunk in chat_completion if chunk .choices ]
2174
+ assert "" .join (chunks ) == "South Atlantic Ocean."
2175
+
2176
+ spans = trace_exporter .get_finished_spans ()
2177
+ assert len (spans ) == 1
2178
+
2179
+ span = spans [0 ]
2180
+ assert span .name == f"chat { TEST_CHAT_MODEL } "
2181
+ assert span .kind == SpanKind .CLIENT
2182
+ assert span .status .status_code == StatusCode .UNSET
2183
+
2184
+ address , port = address_and_port (client )
2185
+ assert dict (span .attributes ) == {
2186
+ GEN_AI_OPERATION_NAME : "chat" ,
2187
+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
2188
+ GEN_AI_SYSTEM : "openai" ,
2189
+ GEN_AI_RESPONSE_ID : "chatcmpl-BOja7e365tj5upRjLFinadEB8ZoDL" ,
2190
+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
2191
+ GEN_AI_RESPONSE_FINISH_REASONS : ("stop" ,),
2192
+ SERVER_ADDRESS : address ,
2193
+ SERVER_PORT : port ,
2194
+ GEN_AI_OPENAI_RESPONSE_SERVICE_TIER : "default" ,
2195
+ }
2196
+
2197
+ logs = logs_exporter .get_finished_logs ()
2198
+ assert len (logs ) == 2
2199
+ log_records = logrecords_from_logs (logs )
2200
+ user_message , choice = log_records
2201
+ assert dict (user_message .attributes ) == {"gen_ai.system" : "openai" , "event.name" : "gen_ai.user.message" }
2202
+ assert dict (user_message .body ) == {}
2203
+
2204
+ assert_stop_log_record (choice )
2205
+
2206
+ (operation_duration_metric ,) = get_sorted_metrics (metrics_reader )
2207
+ attributes = {
2208
+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
2209
+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
2210
+ }
2211
+ assert_operation_duration_metric (
2212
+ client , "chat" , operation_duration_metric , attributes = attributes , min_data_point = 0.006761051714420319
2213
+ )
2214
+
2215
+
2099
2216
@pytest .mark .skipif (OPENAI_VERSION < (1 , 8 , 0 ), reason = "LegacyAPIResponse available" )
2100
2217
@pytest .mark .vcr ()
2101
2218
@pytest .mark .asyncio
0 commit comments