@@ -90,6 +90,48 @@ class OpenAiDefinition:
9090 type = "chat" ,
9191 sync = True ,
9292 ),
93+ OpenAiDefinition (
94+ module = "openai.resources.images" ,
95+ object = "Images" ,
96+ method = "generate" ,
97+ type = "image" ,
98+ sync = True ,
99+ ),
100+ OpenAiDefinition (
101+ module = "openai.resources.images" ,
102+ object = "AsyncImages" ,
103+ method = "generate" ,
104+ type = "image" ,
105+ sync = False ,
106+ ),
107+ OpenAiDefinition (
108+ module = "openai.resources.images" ,
109+ object = "Images" ,
110+ method = "edit" ,
111+ type = "image" ,
112+ sync = True ,
113+ ),
114+ OpenAiDefinition (
115+ module = "openai.resources.images" ,
116+ object = "AsyncImages" ,
117+ method = "edit" ,
118+ type = "image" ,
119+ sync = False ,
120+ ),
121+ OpenAiDefinition (
122+ module = "openai.resources.images" ,
123+ object = "Images" ,
124+ method = "create_variation" ,
125+ type = "image" ,
126+ sync = True ,
127+ ),
128+ OpenAiDefinition (
129+ module = "openai.resources.images" ,
130+ object = "AsyncImages" ,
131+ method = "create_variation" ,
132+ type = "image" ,
133+ sync = False ,
134+ ),
93135 OpenAiDefinition (
94136 module = "openai.resources.completions" ,
95137 object = "Completions" ,
@@ -354,9 +396,12 @@ def _extract_chat_response(kwargs: Any) -> Any:
354396
355397
356398def _get_langfuse_data_from_kwargs (resource : OpenAiDefinition , kwargs : Any ) -> Any :
357- default_name = (
358- "OpenAI-embedding" if resource .type == "embedding" else "OpenAI-generation"
359- )
399+ if resource .type == "embedding" :
400+ default_name = "OpenAI-embedding"
401+ elif resource .type == "image" :
402+ default_name = "OpenAI-image"
403+ else :
404+ default_name = "OpenAI-generation"
360405 name = kwargs .get ("name" , default_name )
361406
362407 if name is None :
@@ -417,6 +462,12 @@ def _get_langfuse_data_from_kwargs(resource: OpenAiDefinition, kwargs: Any) -> A
417462 prompt = _extract_chat_prompt (kwargs )
418463 elif resource .type == "embedding" :
419464 prompt = kwargs .get ("input" , None )
465+ elif resource .type == "image" :
466+ # generate() and edit() accept prompt, but create_variation() does not
467+ if resource .method in ["generate" , "edit" ]:
468+ prompt = kwargs .get ("prompt" , None )
469+ else :
470+ prompt = None # create_variation uses image input, not text prompt
420471
421472 parsed_temperature = (
422473 kwargs .get ("temperature" , 1 )
@@ -479,6 +530,44 @@ def _get_langfuse_data_from_kwargs(resource: OpenAiDefinition, kwargs: Any) -> A
479530 modelParameters ["dimensions" ] = parsed_dimensions
480531 if parsed_encoding_format != "float" :
481532 modelParameters ["encoding_format" ] = parsed_encoding_format
533+ elif resource .type == "image" :
534+ # Image generation parameters
535+ modelParameters = {}
536+
537+ parsed_size = (
538+ kwargs .get ("size" , None )
539+ if not isinstance (kwargs .get ("size" , None ), NotGiven )
540+ else None
541+ )
542+ if parsed_size is not None :
543+ modelParameters ["size" ] = parsed_size
544+
545+ parsed_quality = (
546+ kwargs .get ("quality" , None )
547+ if not isinstance (kwargs .get ("quality" , None ), NotGiven )
548+ else None
549+ )
550+ if parsed_quality is not None :
551+ modelParameters ["quality" ] = parsed_quality
552+
553+ parsed_style = (
554+ kwargs .get ("style" , None )
555+ if not isinstance (kwargs .get ("style" , None ), NotGiven )
556+ else None
557+ )
558+ if parsed_style is not None :
559+ modelParameters ["style" ] = parsed_style
560+
561+ parsed_response_format = (
562+ kwargs .get ("response_format" , None )
563+ if not isinstance (kwargs .get ("response_format" , None ), NotGiven )
564+ else None
565+ )
566+ if parsed_response_format is not None :
567+ modelParameters ["response_format" ] = parsed_response_format
568+
569+ if parsed_n is not None and isinstance (parsed_n , int ) and parsed_n > 1 :
570+ modelParameters ["n" ] = parsed_n
482571 else :
483572 modelParameters = {
484573 "temperature" : parsed_temperature ,
@@ -791,6 +880,33 @@ def _get_langfuse_data_from_default_response(
791880 "count" : len (data ),
792881 }
793882
883+ elif resource .type == "image" :
884+ data = response .get ("data" , [])
885+ completion = []
886+ for item in data :
887+ image_data = item .__dict__ if hasattr (item , "__dict__" ) else item
888+ image_result = {}
889+
890+ # Handle URL response
891+ if image_data .get ("url" ):
892+ image_result ["url" ] = image_data ["url" ]
893+
894+ # Handle base64 response
895+ if image_data .get ("b64_json" ):
896+ # Wrap in LangfuseMedia for proper handling
897+ base64_data_uri = f"data:image/png;base64,{ image_data ['b64_json' ]} "
898+ image_result ["image" ] = LangfuseMedia (base64_data_uri = base64_data_uri )
899+
900+ # Include revised_prompt if present (DALL-E 3)
901+ if image_data .get ("revised_prompt" ):
902+ image_result ["revised_prompt" ] = image_data ["revised_prompt" ]
903+
904+ completion .append (image_result )
905+
906+ # If only one image, unwrap from list
907+ if len (completion ) == 1 :
908+ completion = completion [0 ]
909+
794910 usage = _parse_usage (response .get ("usage" , None ))
795911
796912 return (model , completion , usage )
@@ -842,6 +958,28 @@ def _wrap(
842958 try :
843959 openai_response = wrapped (** arg_extractor .get_openai_args ())
844960
961+ # Handle image generation (non-streaming)
962+ if open_ai_resource .type == "image" :
963+ model , completion , usage = _get_langfuse_data_from_default_response (
964+ open_ai_resource ,
965+ (openai_response and openai_response .__dict__ )
966+ if _is_openai_v1 ()
967+ else openai_response ,
968+ )
969+
970+ # Calculate image count for usage tracking
971+ image_count = 1
972+ if isinstance (completion , list ):
973+ image_count = len (completion )
974+
975+ generation .update (
976+ model = model ,
977+ output = completion ,
978+ usage_details = {"output" : image_count , "total" : image_count , "unit" : "IMAGES" },
979+ ).end ()
980+
981+ return openai_response
982+
845983 if _is_streaming_response (openai_response ):
846984 return LangfuseResponseGeneratorSync (
847985 resource = open_ai_resource ,
@@ -913,6 +1051,28 @@ async def _wrap_async(
9131051 try :
9141052 openai_response = await wrapped (** arg_extractor .get_openai_args ())
9151053
1054+ # Handle image generation (non-streaming)
1055+ if open_ai_resource .type == "image" :
1056+ model , completion , usage = _get_langfuse_data_from_default_response (
1057+ open_ai_resource ,
1058+ (openai_response and openai_response .__dict__ )
1059+ if _is_openai_v1 ()
1060+ else openai_response ,
1061+ )
1062+
1063+ # Calculate image count for usage tracking
1064+ image_count = 1
1065+ if isinstance (completion , list ):
1066+ image_count = len (completion )
1067+
1068+ generation .update (
1069+ model = model ,
1070+ output = completion ,
1071+ usage_details = {"output" : image_count , "total" : image_count , "unit" : "IMAGES" },
1072+ ).end ()
1073+
1074+ return openai_response
1075+
9161076 if _is_streaming_response (openai_response ):
9171077 return LangfuseResponseGeneratorAsync (
9181078 resource = open_ai_resource ,
0 commit comments