aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/openai/types
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/openai/types')
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/__init__.py78
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py113
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py49
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py49
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch.py87
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py49
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_error.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py134
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py212
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py294
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py171
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py3
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py55
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py96
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py61
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py62
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py32
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py67
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py68
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py87
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py52
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py91
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py121
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py122
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py227
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py222
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py150
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py242
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py240
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py100
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py143
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py160
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py160
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread.py63
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py401
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py185
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py55
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py46
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py103
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py55
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py42
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py245
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py261
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py52
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py70
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py44
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py78
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py38
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py115
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py56
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py71
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py73
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py70
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py150
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py79
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py57
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py404
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py5
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py40
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat_model.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion.py37
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion_choice.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py187
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion_usage.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/embedding.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py53
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/embedding_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_content.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_create_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_list_params.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_object.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_purpose.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py223
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py32
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py6
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py236
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py6
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py62
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py65
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/images_response.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/model.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/model_deleted.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation.py186
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_model.py9
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py155
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py44
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py77
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response.py204
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py52
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py212
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py208
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py204
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py43
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py32
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py9
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py131
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py134
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py64
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py67
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py78
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py79
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py48
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py48
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py43
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py48
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py53
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py46
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/upload.py42
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py6
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store.py82
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py40
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py67
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py36
433 files changed, 17545 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/openai/types/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/__init__.py
new file mode 100644
index 00000000..11761534
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/__init__.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .batch import Batch as Batch
+from .image import Image as Image
+from .model import Model as Model
+from .shared import (
+ Metadata as Metadata,
+ AllModels as AllModels,
+ ChatModel as ChatModel,
+ Reasoning as Reasoning,
+ ErrorObject as ErrorObject,
+ CompoundFilter as CompoundFilter,
+ ResponsesModel as ResponsesModel,
+ ReasoningEffort as ReasoningEffort,
+ ComparisonFilter as ComparisonFilter,
+ FunctionDefinition as FunctionDefinition,
+ FunctionParameters as FunctionParameters,
+ ResponseFormatText as ResponseFormatText,
+ ResponseFormatJSONObject as ResponseFormatJSONObject,
+ ResponseFormatJSONSchema as ResponseFormatJSONSchema,
+)
+from .upload import Upload as Upload
+from .embedding import Embedding as Embedding
+from .chat_model import ChatModel as ChatModel
+from .completion import Completion as Completion
+from .moderation import Moderation as Moderation
+from .audio_model import AudioModel as AudioModel
+from .batch_error import BatchError as BatchError
+from .file_object import FileObject as FileObject
+from .image_model import ImageModel as ImageModel
+from .file_content import FileContent as FileContent
+from .file_deleted import FileDeleted as FileDeleted
+from .file_purpose import FilePurpose as FilePurpose
+from .vector_store import VectorStore as VectorStore
+from .model_deleted import ModelDeleted as ModelDeleted
+from .embedding_model import EmbeddingModel as EmbeddingModel
+from .images_response import ImagesResponse as ImagesResponse
+from .completion_usage import CompletionUsage as CompletionUsage
+from .file_list_params import FileListParams as FileListParams
+from .moderation_model import ModerationModel as ModerationModel
+from .batch_list_params import BatchListParams as BatchListParams
+from .completion_choice import CompletionChoice as CompletionChoice
+from .image_edit_params import ImageEditParams as ImageEditParams
+from .file_create_params import FileCreateParams as FileCreateParams
+from .batch_create_params import BatchCreateParams as BatchCreateParams
+from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
+from .upload_create_params import UploadCreateParams as UploadCreateParams
+from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
+from .audio_response_format import AudioResponseFormat as AudioResponseFormat
+from .image_generate_params import ImageGenerateParams as ImageGenerateParams
+from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
+from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
+from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
+from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
+from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
+from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
+from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
+from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
+from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
+from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
+from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
+from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
+from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
+from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
+from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
+from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
+from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
+from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
+from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
+from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
+from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
+from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject
+from .static_file_chunking_strategy_object_param import (
+ StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py
new file mode 100644
index 00000000..396944ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .translation import Translation as Translation
+from .speech_model import SpeechModel as SpeechModel
+from .transcription import Transcription as Transcription
+from .transcription_word import TranscriptionWord as TranscriptionWord
+from .translation_verbose import TranslationVerbose as TranslationVerbose
+from .speech_create_params import SpeechCreateParams as SpeechCreateParams
+from .transcription_include import TranscriptionInclude as TranscriptionInclude
+from .transcription_segment import TranscriptionSegment as TranscriptionSegment
+from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose
+from .translation_create_params import TranslationCreateParams as TranslationCreateParams
+from .transcription_stream_event import TranscriptionStreamEvent as TranscriptionStreamEvent
+from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams
+from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse
+from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse
+from .transcription_text_done_event import TranscriptionTextDoneEvent as TranscriptionTextDoneEvent
+from .transcription_text_delta_event import TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py
new file mode 100644
index 00000000..95868071
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+from .speech_model import SpeechModel
+
+__all__ = ["SpeechCreateParams"]
+
+
+class SpeechCreateParams(TypedDict, total=False):
+ input: Required[str]
+ """The text to generate audio for. The maximum length is 4096 characters."""
+
+ model: Required[Union[str, SpeechModel]]
+ """
+ One of the available [TTS models](https://platform.openai.com/docs/models#tts):
+ `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+ """
+
+ voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]]
+ """The voice to use when generating the audio.
+
+ Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`,
+ `sage` and `shimmer`. Previews of the voices are available in the
+ [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
+ """
+
+ instructions: str
+ """Control the voice of your generated audio with additional instructions.
+
+ Does not work with `tts-1` or `tts-1-hd`.
+ """
+
+ response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
+ """The format to audio in.
+
+ Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
+ """
+
+ speed: float
+ """The speed of the generated audio.
+
+ Select a value from `0.25` to `4.0`. `1.0` is the default.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py
new file mode 100644
index 00000000..f004f805
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["SpeechModel"]
+
+SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py
new file mode 100644
index 00000000..15763854
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["Transcription", "Logprob"]
+
+
+class Logprob(BaseModel):
+ token: Optional[str] = None
+ """The token in the transcription."""
+
+ bytes: Optional[List[float]] = None
+ """The bytes of the token."""
+
+ logprob: Optional[float] = None
+ """The log probability of the token."""
+
+
+class Transcription(BaseModel):
+ text: str
+ """The transcribed text."""
+
+ logprobs: Optional[List[Logprob]] = None
+ """The log probabilities of the tokens in the transcription.
+
+ Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`
+ if `logprobs` is added to the `include` array.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py
new file mode 100644
index 00000000..0cda4c79
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py
@@ -0,0 +1,113 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ..._types import FileTypes
+from ..audio_model import AudioModel
+from .transcription_include import TranscriptionInclude
+from ..audio_response_format import AudioResponseFormat
+
+__all__ = [
+ "TranscriptionCreateParamsBase",
+ "TranscriptionCreateParamsNonStreaming",
+ "TranscriptionCreateParamsStreaming",
+]
+
+
+class TranscriptionCreateParamsBase(TypedDict, total=False):
+ file: Required[FileTypes]
+ """
+ The audio file object (not file name) to transcribe, in one of these formats:
+ flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ """
+
+ model: Required[Union[str, AudioModel]]
+ """ID of the model to use.
+
+ The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1`
+ (which is powered by our open source Whisper V2 model).
+ """
+
+ include: List[TranscriptionInclude]
+ """Additional information to include in the transcription response.
+
+ `logprobs` will return the log probabilities of the tokens in the response to
+ understand the model's confidence in the transcription. `logprobs` only works
+ with response_format set to `json` and only with the models `gpt-4o-transcribe`
+ and `gpt-4o-mini-transcribe`.
+ """
+
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ prompt: str
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
+ response_format: AudioResponseFormat
+ """
+ The format of the output, in one of these options: `json`, `text`, `srt`,
+ `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
+ the only supported format is `json`.
+ """
+
+ temperature: float
+ """The sampling temperature, between 0 and 1.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. If set to 0, the model will use
+ [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+ automatically increase the temperature until certain thresholds are hit.
+ """
+
+ timestamp_granularities: List[Literal["word", "segment"]]
+ """The timestamp granularities to populate for this transcription.
+
+ `response_format` must be set `verbose_json` to use timestamp granularities.
+ Either or both of these options are supported: `word`, or `segment`. Note: There
+ is no additional latency for segment timestamps, but generating word timestamps
+ incurs additional latency.
+ """
+
+
+class TranscriptionCreateParamsNonStreaming(TranscriptionCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+ for more information.
+
+ Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+ """
+
+
+class TranscriptionCreateParamsStreaming(TranscriptionCreateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+ for more information.
+
+ Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+ """
+
+
+TranscriptionCreateParams = Union[TranscriptionCreateParamsNonStreaming, TranscriptionCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py
new file mode 100644
index 00000000..2f7bed81
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .transcription import Transcription
+from .transcription_verbose import TranscriptionVerbose
+
+__all__ = ["TranscriptionCreateResponse"]
+
+TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py
new file mode 100644
index 00000000..0e464ac9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["TranscriptionInclude"]
+
+TranscriptionInclude: TypeAlias = Literal["logprobs"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py
new file mode 100644
index 00000000..522c401e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionSegment"]
+
+
+class TranscriptionSegment(BaseModel):
+ id: int
+ """Unique identifier of the segment."""
+
+ avg_logprob: float
+ """Average logprob of the segment.
+
+ If the value is lower than -1, consider the logprobs failed.
+ """
+
+ compression_ratio: float
+ """Compression ratio of the segment.
+
+ If the value is greater than 2.4, consider the compression failed.
+ """
+
+ end: float
+ """End time of the segment in seconds."""
+
+ no_speech_prob: float
+ """Probability of no speech in the segment.
+
+ If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this
+ segment silent.
+ """
+
+ seek: int
+ """Seek offset of the segment."""
+
+ start: float
+ """Start time of the segment in seconds."""
+
+ temperature: float
+ """Temperature parameter used for generating the segment."""
+
+ text: str
+ """Text content of the segment."""
+
+ tokens: List[int]
+ """Array of token IDs for the text content."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py
new file mode 100644
index 00000000..757077a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .transcription_text_done_event import TranscriptionTextDoneEvent
+from .transcription_text_delta_event import TranscriptionTextDeltaEvent
+
+__all__ = ["TranscriptionStreamEvent"]
+
+TranscriptionStreamEvent: TypeAlias = Annotated[
+ Union[TranscriptionTextDeltaEvent, TranscriptionTextDoneEvent], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py
new file mode 100644
index 00000000..f8d53554
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionTextDeltaEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+ token: Optional[str] = None
+ """The token that was used to generate the log probability."""
+
+ bytes: Optional[List[object]] = None
+ """The bytes that were used to generate the log probability."""
+
+ logprob: Optional[float] = None
+ """The log probability of the token."""
+
+
+class TranscriptionTextDeltaEvent(BaseModel):
+ delta: str
+ """The text delta that was additionally transcribed."""
+
+ type: Literal["transcript.text.delta"]
+ """The type of the event. Always `transcript.text.delta`."""
+
+ logprobs: Optional[List[Logprob]] = None
+ """The log probabilities of the delta.
+
+ Only included if you
+ [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
+ with the `include[]` parameter set to `logprobs`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py
new file mode 100644
index 00000000..3f1a713a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionTextDoneEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+ token: Optional[str] = None
+ """The token that was used to generate the log probability."""
+
+ bytes: Optional[List[object]] = None
+ """The bytes that were used to generate the log probability."""
+
+ logprob: Optional[float] = None
+ """The log probability of the token."""
+
+
+class TranscriptionTextDoneEvent(BaseModel):
+ text: str
+ """The text that was transcribed."""
+
+ type: Literal["transcript.text.done"]
+ """The type of the event. Always `transcript.text.done`."""
+
+ logprobs: Optional[List[Logprob]] = None
+ """The log probabilities of the individual tokens in the transcription.
+
+ Only included if you
+ [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
+ with the `include[]` parameter set to `logprobs`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py
new file mode 100644
index 00000000..2a670189
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .transcription_word import TranscriptionWord
+from .transcription_segment import TranscriptionSegment
+
+__all__ = ["TranscriptionVerbose"]
+
+
+class TranscriptionVerbose(BaseModel):
+ duration: float
+ """The duration of the input audio."""
+
+ language: str
+ """The language of the input audio."""
+
+ text: str
+ """The transcribed text."""
+
+ segments: Optional[List[TranscriptionSegment]] = None
+ """Segments of the transcribed text and their corresponding details."""
+
+ words: Optional[List[TranscriptionWord]] = None
+ """Extracted words and their corresponding timestamps."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py
new file mode 100644
index 00000000..969da325
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionWord"]
+
+
+class TranscriptionWord(BaseModel):
+ end: float
+ """End time of the word in seconds."""
+
+ start: float
+ """Start time of the word in seconds."""
+
+ word: str
+ """The text content of the word."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation.py
new file mode 100644
index 00000000..7c0e9051
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["Translation"]
+
+
+class Translation(BaseModel):
+ text: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py
new file mode 100644
index 00000000..b23a1853
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+from ..._types import FileTypes
+from ..audio_model import AudioModel
+
+__all__ = ["TranslationCreateParams"]
+
+
+class TranslationCreateParams(TypedDict, total=False):
+ file: Required[FileTypes]
+ """
+ The audio file object (not file name) translate, in one of these formats: flac,
+ mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ """
+
+ model: Required[Union[str, AudioModel]]
+ """ID of the model to use.
+
+ Only `whisper-1` (which is powered by our open source Whisper V2 model) is
+ currently available.
+ """
+
+ prompt: str
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should be in English.
+ """
+
+ response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
+ """
+ The format of the output, in one of these options: `json`, `text`, `srt`,
+ `verbose_json`, or `vtt`.
+ """
+
+ temperature: float
+ """The sampling temperature, between 0 and 1.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. If set to 0, the model will use
+ [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+ automatically increase the temperature until certain thresholds are hit.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py
new file mode 100644
index 00000000..9953813c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .translation import Translation
+from .translation_verbose import TranslationVerbose
+
+__all__ = ["TranslationCreateResponse"]
+
+TranslationCreateResponse: TypeAlias = Union[Translation, TranslationVerbose]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py
new file mode 100644
index 00000000..27cb02d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .transcription_segment import TranscriptionSegment
+
+__all__ = ["TranslationVerbose"]
+
+
+class TranslationVerbose(BaseModel):
+ duration: float
+ """The duration of the input audio."""
+
+ language: str
+ """The language of the output translation (always `english`)."""
+
+ text: str
+ """The translated text."""
+
+ segments: Optional[List[TranscriptionSegment]] = None
+ """Segments of the translated text and their corresponding details."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio_model.py b/.venv/lib/python3.12/site-packages/openai/types/audio_model.py
new file mode 100644
index 00000000..4d14d601
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["AudioModel"]
+
+AudioModel: TypeAlias = Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py b/.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py
new file mode 100644
index 00000000..f8c8d459
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["AudioResponseFormat"]
+
+AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py b/.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py
new file mode 100644
index 00000000..6f17836b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["AutoFileChunkingStrategyParam"]
+
+
+class AutoFileChunkingStrategyParam(TypedDict, total=False):
+ type: Required[Literal["auto"]]
+ """Always `auto`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch.py b/.venv/lib/python3.12/site-packages/openai/types/batch.py
new file mode 100644
index 00000000..35de90ac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch.py
@@ -0,0 +1,87 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .batch_error import BatchError
+from .shared.metadata import Metadata
+from .batch_request_counts import BatchRequestCounts
+
+__all__ = ["Batch", "Errors"]
+
+
+class Errors(BaseModel):
+ data: Optional[List[BatchError]] = None
+
+ object: Optional[str] = None
+ """The object type, which is always `list`."""
+
+
+class Batch(BaseModel):
+ id: str
+
+ completion_window: str
+ """The time frame within which the batch should be processed."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the batch was created."""
+
+ endpoint: str
+ """The OpenAI API endpoint used by the batch."""
+
+ input_file_id: str
+ """The ID of the input file for the batch."""
+
+ object: Literal["batch"]
+ """The object type, which is always `batch`."""
+
+ status: Literal[
+ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
+ ]
+ """The current status of the batch."""
+
+ cancelled_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was cancelled."""
+
+ cancelling_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started cancelling."""
+
+ completed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was completed."""
+
+ error_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of requests with errors."""
+
+ errors: Optional[Errors] = None
+
+ expired_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch expired."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch will expire."""
+
+ failed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch failed."""
+
+ finalizing_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started finalizing."""
+
+ in_progress_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started processing."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ output_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of successfully executed requests."""
+
+ request_counts: Optional[BatchRequestCounts] = None
+ """The request counts for different statuses within the batch."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py
new file mode 100644
index 00000000..cc95afd3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .shared_params.metadata import Metadata
+
+__all__ = ["BatchCreateParams"]
+
+
+class BatchCreateParams(TypedDict, total=False):
+ completion_window: Required[Literal["24h"]]
+ """The time frame within which the batch should be processed.
+
+ Currently only `24h` is supported.
+ """
+
+ endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
+ """The endpoint to be used for all requests in the batch.
+
+ Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and
+ `/v1/completions` are supported. Note that `/v1/embeddings` batches are also
+ restricted to a maximum of 50,000 embedding inputs across all requests in the
+ batch.
+ """
+
+ input_file_id: Required[str]
+ """The ID of an uploaded file that contains requests for the new batch.
+
+ See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+ for how to upload a file.
+
+ Your input file must be formatted as a
+ [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
+ and must be uploaded with the purpose `batch`. The file can contain up to 50,000
+ requests, and can be up to 200 MB in size.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_error.py b/.venv/lib/python3.12/site-packages/openai/types/batch_error.py
new file mode 100644
index 00000000..1cdd808d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_error.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["BatchError"]
+
+
+class BatchError(BaseModel):
+ code: Optional[str] = None
+ """An error code identifying the error type."""
+
+ line: Optional[int] = None
+ """The line number of the input file where the error occurred, if applicable."""
+
+ message: Optional[str] = None
+ """A human-readable message providing more details about the error."""
+
+ param: Optional[str] = None
+ """The name of the parameter that caused the error, if applicable."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py
new file mode 100644
index 00000000..ef5e966b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["BatchListParams"]
+
+
+class BatchListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py b/.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py
new file mode 100644
index 00000000..7e1d49fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["BatchRequestCounts"]
+
+
+class BatchRequestCounts(BaseModel):
+ completed: int
+ """Number of requests that have been completed successfully."""
+
+ failed: int
+ """Number of requests that have failed."""
+
+ total: int
+ """Total number of requests in the batch."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py
new file mode 100644
index 00000000..5ba3eadf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .thread import Thread as Thread
+from .assistant import Assistant as Assistant
+from .function_tool import FunctionTool as FunctionTool
+from .assistant_tool import AssistantTool as AssistantTool
+from .thread_deleted import ThreadDeleted as ThreadDeleted
+from .file_search_tool import FileSearchTool as FileSearchTool
+from .assistant_deleted import AssistantDeleted as AssistantDeleted
+from .function_tool_param import FunctionToolParam as FunctionToolParam
+from .assistant_tool_param import AssistantToolParam as AssistantToolParam
+from .thread_create_params import ThreadCreateParams as ThreadCreateParams
+from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams
+from .assistant_list_params import AssistantListParams as AssistantListParams
+from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice
+from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool
+from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
+from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
+from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
+from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
+from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
+from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
+from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
+from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
+from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
+from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
+from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam
+from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam
+from .assistant_response_format_option_param import (
+ AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py
new file mode 100644
index 00000000..58421e0f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py
@@ -0,0 +1,134 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .assistant_tool import AssistantTool
+from ..shared.metadata import Metadata
+from .assistant_response_format_option import AssistantResponseFormatOption
+
+__all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ToolResourcesCodeInterpreter(BaseModel):
+ file_ids: Optional[List[str]] = None
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter`` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(BaseModel):
+ vector_store_ids: Optional[List[str]] = None
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+
+class ToolResources(BaseModel):
+ code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
+
+ file_search: Optional[ToolResourcesFileSearch] = None
+
+
+class Assistant(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the assistant was created."""
+
+ description: Optional[str] = None
+ """The description of the assistant. The maximum length is 512 characters."""
+
+ instructions: Optional[str] = None
+ """The system instructions that the assistant uses.
+
+ The maximum length is 256,000 characters.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: str
+ """ID of the model to use.
+
+ You can use the
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ see all of your available models, or see our
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
+ """
+
+ name: Optional[str] = None
+ """The name of the assistant. The maximum length is 256 characters."""
+
+ object: Literal["assistant"]
+ """The object type, which is always `assistant`."""
+
+ tools: List[AssistantTool]
+ """A list of tool enabled on the assistant.
+
+ There can be a maximum of 128 tools per assistant. Tools can be of types
+ `code_interpreter`, `file_search`, or `function`.
+ """
+
+ response_format: Optional[AssistantResponseFormatOption] = None
+ """Specifies the format that the model must output.
+
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
+ in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ temperature: Optional[float] = None
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+ """
+
+ tool_resources: Optional[ToolResources] = None
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+ top_p: Optional[float] = None
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py
new file mode 100644
index 00000000..8b3c3318
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py
@@ -0,0 +1,212 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared.chat_model import ChatModel
+from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = [
+ "AssistantCreateParams",
+ "ToolResources",
+ "ToolResourcesCodeInterpreter",
+ "ToolResourcesFileSearch",
+ "ToolResourcesFileSearchVectorStore",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategy",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
+]
+
+
+class AssistantCreateParams(TypedDict, total=False):
+ model: Required[Union[str, ChatModel]]
+ """ID of the model to use.
+
+ You can use the
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ see all of your available models, or see our
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
+ """
+
+ description: Optional[str]
+ """The description of the assistant. The maximum length is 512 characters."""
+
+ instructions: Optional[str]
+ """The system instructions that the assistant uses.
+
+ The maximum length is 256,000 characters.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ name: Optional[str]
+ """The name of the assistant. The maximum length is 256 characters."""
+
+ reasoning_effort: Optional[ReasoningEffort]
+ """**o-series models only**
+
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+ result in faster responses and fewer tokens used on reasoning in a response.
+ """
+
+ response_format: Optional[AssistantResponseFormatOptionParam]
+ """Specifies the format that the model must output.
+
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
+ in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+ """
+
+ tool_resources: Optional[ToolResources]
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+ tools: Iterable[AssistantToolParam]
+ """A list of tool enabled on the assistant.
+
+ There can be a maximum of 128 tools per assistant. Tools can be of types
+ `code_interpreter`, `file_search`, or `function`.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+ type: Required[Literal["auto"]]
+ """Always `auto`."""
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+ chunk_overlap_tokens: Required[int]
+ """The number of tokens that overlap between chunks. The default value is `400`.
+
+ Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+ """
+
+ max_chunk_size_tokens: Required[int]
+ """The maximum number of tokens in each chunk.
+
+ The default value is `800`. The minimum value is `100` and the maximum value is
+ `4096`.
+ """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+ static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+ type: Required[Literal["static"]]
+ """Always `static`."""
+
+
+ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+ ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
+]
+
+
+class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
+ chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
+ """The chunking strategy used to chunk the file(s).
+
+ If not set, will use the `auto` strategy.
+ """
+
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ add to the vector store. There can be a maximum of 10000 files in a vector
+ store.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+ vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
+ """
+ A helper to create a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ with file_ids and attach it to this assistant. There can be a maximum of 1
+ vector store attached to the assistant.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py
new file mode 100644
index 00000000..3be40cd6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["AssistantDeleted"]
+
+
+class AssistantDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["assistant.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py
new file mode 100644
index 00000000..834ffbca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["AssistantListParams"]
+
+
+class AssistantListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py
new file mode 100644
index 00000000..6f06a344
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from ..shared.response_format_text import ResponseFormatText
+from ..shared.response_format_json_object import ResponseFormatJSONObject
+from ..shared.response_format_json_schema import ResponseFormatJSONSchema
+
+__all__ = ["AssistantResponseFormatOption"]
+
+AssistantResponseFormatOption: TypeAlias = Union[
+ Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py
new file mode 100644
index 00000000..5e724a4d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from ..shared_params.response_format_text import ResponseFormatText
+from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
+
+__all__ = ["AssistantResponseFormatOptionParam"]
+
+AssistantResponseFormatOptionParam: TypeAlias = Union[
+ Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py
new file mode 100644
index 00000000..41d3a0c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py
@@ -0,0 +1,294 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .thread import Thread
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .threads.run import Run
+from .threads.message import Message
+from ..shared.error_object import ErrorObject
+from .threads.runs.run_step import RunStep
+from .threads.message_delta_event import MessageDeltaEvent
+from .threads.runs.run_step_delta_event import RunStepDeltaEvent
+
+__all__ = [
+ "AssistantStreamEvent",
+ "ThreadCreated",
+ "ThreadRunCreated",
+ "ThreadRunQueued",
+ "ThreadRunInProgress",
+ "ThreadRunRequiresAction",
+ "ThreadRunCompleted",
+ "ThreadRunIncomplete",
+ "ThreadRunFailed",
+ "ThreadRunCancelling",
+ "ThreadRunCancelled",
+ "ThreadRunExpired",
+ "ThreadRunStepCreated",
+ "ThreadRunStepInProgress",
+ "ThreadRunStepDelta",
+ "ThreadRunStepCompleted",
+ "ThreadRunStepFailed",
+ "ThreadRunStepCancelled",
+ "ThreadRunStepExpired",
+ "ThreadMessageCreated",
+ "ThreadMessageInProgress",
+ "ThreadMessageDelta",
+ "ThreadMessageCompleted",
+ "ThreadMessageIncomplete",
+ "ErrorEvent",
+]
+
+
+class ThreadCreated(BaseModel):
+ data: Thread
+ """
+ Represents a thread that contains
+ [messages](https://platform.openai.com/docs/api-reference/messages).
+ """
+
+ event: Literal["thread.created"]
+
+ enabled: Optional[bool] = None
+ """Whether to enable input audio transcription."""
+
+
+class ThreadRunCreated(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.created"]
+
+
+class ThreadRunQueued(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.queued"]
+
+
+class ThreadRunInProgress(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.in_progress"]
+
+
+class ThreadRunRequiresAction(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.requires_action"]
+
+
+class ThreadRunCompleted(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.completed"]
+
+
+class ThreadRunIncomplete(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.incomplete"]
+
+
+class ThreadRunFailed(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.failed"]
+
+
+class ThreadRunCancelling(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.cancelling"]
+
+
+class ThreadRunCancelled(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.cancelled"]
+
+
+class ThreadRunExpired(BaseModel):
+ data: Run
+ """
+ Represents an execution run on a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.run.expired"]
+
+
+class ThreadRunStepCreated(BaseModel):
+ data: RunStep
+ """Represents a step in execution of a run."""
+
+ event: Literal["thread.run.step.created"]
+
+
+class ThreadRunStepInProgress(BaseModel):
+ data: RunStep
+ """Represents a step in execution of a run."""
+
+ event: Literal["thread.run.step.in_progress"]
+
+
+class ThreadRunStepDelta(BaseModel):
+ data: RunStepDeltaEvent
+ """Represents a run step delta i.e.
+
+ any changed fields on a run step during streaming.
+ """
+
+ event: Literal["thread.run.step.delta"]
+
+
+class ThreadRunStepCompleted(BaseModel):
+ data: RunStep
+ """Represents a step in execution of a run."""
+
+ event: Literal["thread.run.step.completed"]
+
+
+class ThreadRunStepFailed(BaseModel):
+ data: RunStep
+ """Represents a step in execution of a run."""
+
+ event: Literal["thread.run.step.failed"]
+
+
+class ThreadRunStepCancelled(BaseModel):
+ data: RunStep
+ """Represents a step in execution of a run."""
+
+ event: Literal["thread.run.step.cancelled"]
+
+
+class ThreadRunStepExpired(BaseModel):
+ data: RunStep
+ """Represents a step in execution of a run."""
+
+ event: Literal["thread.run.step.expired"]
+
+
+class ThreadMessageCreated(BaseModel):
+ data: Message
+ """
+ Represents a message within a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.message.created"]
+
+
+class ThreadMessageInProgress(BaseModel):
+ data: Message
+ """
+ Represents a message within a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.message.in_progress"]
+
+
+class ThreadMessageDelta(BaseModel):
+ data: MessageDeltaEvent
+ """Represents a message delta i.e.
+
+ any changed fields on a message during streaming.
+ """
+
+ event: Literal["thread.message.delta"]
+
+
+class ThreadMessageCompleted(BaseModel):
+ data: Message
+ """
+ Represents a message within a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.message.completed"]
+
+
+class ThreadMessageIncomplete(BaseModel):
+ data: Message
+ """
+ Represents a message within a
+ [thread](https://platform.openai.com/docs/api-reference/threads).
+ """
+
+ event: Literal["thread.message.incomplete"]
+
+
+class ErrorEvent(BaseModel):
+ data: ErrorObject
+
+ event: Literal["error"]
+
+
+AssistantStreamEvent: TypeAlias = Annotated[
+ Union[
+ ThreadCreated,
+ ThreadRunCreated,
+ ThreadRunQueued,
+ ThreadRunInProgress,
+ ThreadRunRequiresAction,
+ ThreadRunCompleted,
+ ThreadRunIncomplete,
+ ThreadRunFailed,
+ ThreadRunCancelling,
+ ThreadRunCancelled,
+ ThreadRunExpired,
+ ThreadRunStepCreated,
+ ThreadRunStepInProgress,
+ ThreadRunStepDelta,
+ ThreadRunStepCompleted,
+ ThreadRunStepFailed,
+ ThreadRunStepCancelled,
+ ThreadRunStepExpired,
+ ThreadMessageCreated,
+ ThreadMessageInProgress,
+ ThreadMessageDelta,
+ ThreadMessageCompleted,
+ ThreadMessageIncomplete,
+ ErrorEvent,
+ ],
+ PropertyInfo(discriminator="event"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py
new file mode 100644
index 00000000..1bde6858
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .function_tool import FunctionTool
+from .file_search_tool import FileSearchTool
+from .code_interpreter_tool import CodeInterpreterTool
+
+__all__ = ["AssistantTool"]
+
+AssistantTool: TypeAlias = Annotated[
+ Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py
new file mode 100644
index 00000000..d73439f0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .assistant_tool_choice_function import AssistantToolChoiceFunction
+
+__all__ = ["AssistantToolChoice"]
+
+
+class AssistantToolChoice(BaseModel):
+ type: Literal["function", "code_interpreter", "file_search"]
+ """The type of the tool. If type is `function`, the function name must be set"""
+
+ function: Optional[AssistantToolChoiceFunction] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py
new file mode 100644
index 00000000..0c896d80
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["AssistantToolChoiceFunction"]
+
+
+class AssistantToolChoiceFunction(BaseModel):
+ name: str
+ """The name of the function to call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py
new file mode 100644
index 00000000..428857de
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["AssistantToolChoiceFunctionParam"]
+
+
+class AssistantToolChoiceFunctionParam(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py
new file mode 100644
index 00000000..e57c3278
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .assistant_tool_choice import AssistantToolChoice
+
+__all__ = ["AssistantToolChoiceOption"]
+
+AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py
new file mode 100644
index 00000000..cc0053d3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .assistant_tool_choice_param import AssistantToolChoiceParam
+
+__all__ = ["AssistantToolChoiceOptionParam"]
+
+AssistantToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py
new file mode 100644
index 00000000..904f489e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam
+
+__all__ = ["AssistantToolChoiceParam"]
+
+
+class AssistantToolChoiceParam(TypedDict, total=False):
+ type: Required[Literal["function", "code_interpreter", "file_search"]]
+ """The type of the tool. If type is `function`, the function name must be set"""
+
+ function: AssistantToolChoiceFunctionParam
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py
new file mode 100644
index 00000000..321c4b1d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .function_tool_param import FunctionToolParam
+from .file_search_tool_param import FileSearchToolParam
+from .code_interpreter_tool_param import CodeInterpreterToolParam
+
+__all__ = ["AssistantToolParam"]
+
+AssistantToolParam: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py
new file mode 100644
index 00000000..d3ec7614
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py
@@ -0,0 +1,171 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, TypedDict
+
+from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class AssistantUpdateParams(TypedDict, total=False):
+ description: Optional[str]
+ """The description of the assistant. The maximum length is 512 characters."""
+
+ instructions: Optional[str]
+ """The system instructions that the assistant uses.
+
+ The maximum length is 256,000 characters.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: Union[
+ str,
+ Literal[
+ "o3-mini",
+ "o3-mini-2025-01-31",
+ "o1",
+ "o1-2024-12-17",
+ "gpt-4o",
+ "gpt-4o-2024-11-20",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-mini",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4.5-preview",
+ "gpt-4.5-preview-2025-02-27",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-0125-preview",
+ "gpt-4-turbo-preview",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-16k-0613",
+ ],
+ ]
+ """ID of the model to use.
+
+ You can use the
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ see all of your available models, or see our
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
+ """
+
+ name: Optional[str]
+ """The name of the assistant. The maximum length is 256 characters."""
+
+ reasoning_effort: Optional[ReasoningEffort]
+ """**o-series models only**
+
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+ result in faster responses and fewer tokens used on reasoning in a response.
+ """
+
+ response_format: Optional[AssistantResponseFormatOptionParam]
+ """Specifies the format that the model must output.
+
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
+ in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+ """
+
+ tool_resources: Optional[ToolResources]
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+ tools: Iterable[AssistantToolParam]
+ """A list of tool enabled on the assistant.
+
+ There can be a maximum of 128 tools per assistant. Tools can be of types
+ `code_interpreter`, `file_search`, or `function`.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ Overrides the list of
+ [file](https://platform.openai.com/docs/api-reference/files) IDs made available
+ to the `code_interpreter` tool. There can be a maximum of 20 files associated
+ with the tool.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ Overrides the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py
new file mode 100644
index 00000000..17ab3de6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["CodeInterpreterTool"]
+
+
+class CodeInterpreterTool(BaseModel):
+ type: Literal["code_interpreter"]
+ """The type of tool being defined: `code_interpreter`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py
new file mode 100644
index 00000000..4f6916d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["CodeInterpreterToolParam"]
+
+
+class CodeInterpreterToolParam(TypedDict, total=False):
+ type: Required[Literal["code_interpreter"]]
+ """The type of tool being defined: `code_interpreter`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py
new file mode 100644
index 00000000..89fc16c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileSearchTool", "FileSearch", "FileSearchRankingOptions"]
+
+
+class FileSearchRankingOptions(BaseModel):
+ score_threshold: float
+ """The score threshold for the file search.
+
+ All values must be a floating point number between 0 and 1.
+ """
+
+ ranker: Optional[Literal["auto", "default_2024_08_21"]] = None
+ """The ranker to use for the file search.
+
+ If not specified will use the `auto` ranker.
+ """
+
+
+class FileSearch(BaseModel):
+ max_num_results: Optional[int] = None
+ """The maximum number of results the file search tool should output.
+
+ The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+ should be between 1 and 50 inclusive.
+
+ Note that the file search tool may output fewer than `max_num_results` results.
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+ for more information.
+ """
+
+ ranking_options: Optional[FileSearchRankingOptions] = None
+ """The ranking options for the file search.
+
+ If not specified, the file search tool will use the `auto` ranker and a
+ score_threshold of 0.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+ for more information.
+ """
+
+
+class FileSearchTool(BaseModel):
+ type: Literal["file_search"]
+ """The type of tool being defined: `file_search`"""
+
+ file_search: Optional[FileSearch] = None
+ """Overrides for the file search tool."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py
new file mode 100644
index 00000000..c73d0af7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FileSearchToolParam", "FileSearch", "FileSearchRankingOptions"]
+
+
+class FileSearchRankingOptions(TypedDict, total=False):
+ score_threshold: Required[float]
+ """The score threshold for the file search.
+
+ All values must be a floating point number between 0 and 1.
+ """
+
+ ranker: Literal["auto", "default_2024_08_21"]
+ """The ranker to use for the file search.
+
+ If not specified will use the `auto` ranker.
+ """
+
+
+class FileSearch(TypedDict, total=False):
+ max_num_results: int
+ """The maximum number of results the file search tool should output.
+
+ The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+ should be between 1 and 50 inclusive.
+
+ Note that the file search tool may output fewer than `max_num_results` results.
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+ for more information.
+ """
+
+ ranking_options: FileSearchRankingOptions
+ """The ranking options for the file search.
+
+ If not specified, the file search tool will use the `auto` ranker and a
+ score_threshold of 0.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+ for more information.
+ """
+
+
+class FileSearchToolParam(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """The type of tool being defined: `file_search`"""
+
+ file_search: FileSearch
+ """Overrides for the file search tool."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py
new file mode 100644
index 00000000..f9227678
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.function_definition import FunctionDefinition
+
+__all__ = ["FunctionTool"]
+
+
+class FunctionTool(BaseModel):
+ function: FunctionDefinition
+
+ type: Literal["function"]
+ """The type of tool being defined: `function`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py
new file mode 100644
index 00000000..d906e02b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.function_definition import FunctionDefinition
+
+__all__ = ["FunctionToolParam"]
+
+
+class FunctionToolParam(TypedDict, total=False):
+ function: Required[FunctionDefinition]
+
+ type: Required[Literal["function"]]
+ """The type of tool being defined: `function`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py
new file mode 100644
index 00000000..0374b9b4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py
@@ -0,0 +1,96 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .session import Session as Session
+from .error_event import ErrorEvent as ErrorEvent
+from .conversation_item import ConversationItem as ConversationItem
+from .realtime_response import RealtimeResponse as RealtimeResponse
+from .response_done_event import ResponseDoneEvent as ResponseDoneEvent
+from .session_update_event import SessionUpdateEvent as SessionUpdateEvent
+from .realtime_client_event import RealtimeClientEvent as RealtimeClientEvent
+from .realtime_server_event import RealtimeServerEvent as RealtimeServerEvent
+from .response_cancel_event import ResponseCancelEvent as ResponseCancelEvent
+from .response_create_event import ResponseCreateEvent as ResponseCreateEvent
+from .session_create_params import SessionCreateParams as SessionCreateParams
+from .session_created_event import SessionCreatedEvent as SessionCreatedEvent
+from .session_updated_event import SessionUpdatedEvent as SessionUpdatedEvent
+from .transcription_session import TranscriptionSession as TranscriptionSession
+from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent
+from .conversation_item_param import ConversationItemParam as ConversationItemParam
+from .realtime_connect_params import RealtimeConnectParams as RealtimeConnectParams
+from .realtime_response_usage import RealtimeResponseUsage as RealtimeResponseUsage
+from .session_create_response import SessionCreateResponse as SessionCreateResponse
+from .realtime_response_status import RealtimeResponseStatus as RealtimeResponseStatus
+from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent
+from .conversation_item_content import ConversationItemContent as ConversationItemContent
+from .rate_limits_updated_event import RateLimitsUpdatedEvent as RateLimitsUpdatedEvent
+from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent
+from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent
+from .conversation_created_event import ConversationCreatedEvent as ConversationCreatedEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
+from .session_update_event_param import SessionUpdateEventParam as SessionUpdateEventParam
+from .realtime_client_event_param import RealtimeClientEventParam as RealtimeClientEventParam
+from .response_cancel_event_param import ResponseCancelEventParam as ResponseCancelEventParam
+from .response_create_event_param import ResponseCreateEventParam as ResponseCreateEventParam
+from .transcription_session_update import TranscriptionSessionUpdate as TranscriptionSessionUpdate
+from .conversation_item_create_event import ConversationItemCreateEvent as ConversationItemCreateEvent
+from .conversation_item_delete_event import ConversationItemDeleteEvent as ConversationItemDeleteEvent
+from .input_audio_buffer_clear_event import InputAudioBufferClearEvent as InputAudioBufferClearEvent
+from .conversation_item_content_param import ConversationItemContentParam as ConversationItemContentParam
+from .conversation_item_created_event import ConversationItemCreatedEvent as ConversationItemCreatedEvent
+from .conversation_item_deleted_event import ConversationItemDeletedEvent as ConversationItemDeletedEvent
+from .input_audio_buffer_append_event import InputAudioBufferAppendEvent as InputAudioBufferAppendEvent
+from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent as InputAudioBufferCommitEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
+from .conversation_item_retrieve_event import ConversationItemRetrieveEvent as ConversationItemRetrieveEvent
+from .conversation_item_truncate_event import ConversationItemTruncateEvent as ConversationItemTruncateEvent
+from .conversation_item_with_reference import ConversationItemWithReference as ConversationItemWithReference
+from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent as InputAudioBufferClearedEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
+from .conversation_item_truncated_event import ConversationItemTruncatedEvent as ConversationItemTruncatedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
+from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent
+from .transcription_session_update_param import TranscriptionSessionUpdateParam as TranscriptionSessionUpdateParam
+from .transcription_session_create_params import TranscriptionSessionCreateParams as TranscriptionSessionCreateParams
+from .transcription_session_updated_event import TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent
+from .conversation_item_create_event_param import ConversationItemCreateEventParam as ConversationItemCreateEventParam
+from .conversation_item_delete_event_param import ConversationItemDeleteEventParam as ConversationItemDeleteEventParam
+from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam as InputAudioBufferClearEventParam
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent
+from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam as InputAudioBufferAppendEventParam
+from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam as InputAudioBufferCommitEventParam
+from .response_audio_transcript_delta_event import (
+ ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
+)
+from .conversation_item_retrieve_event_param import (
+ ConversationItemRetrieveEventParam as ConversationItemRetrieveEventParam,
+)
+from .conversation_item_truncate_event_param import (
+ ConversationItemTruncateEventParam as ConversationItemTruncateEventParam,
+)
+from .conversation_item_with_reference_param import (
+ ConversationItemWithReferenceParam as ConversationItemWithReferenceParam,
+)
+from .input_audio_buffer_speech_started_event import (
+ InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent,
+)
+from .input_audio_buffer_speech_stopped_event import (
+ InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent,
+)
+from .response_function_call_arguments_done_event import (
+ ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
+)
+from .response_function_call_arguments_delta_event import (
+ ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
+)
+from .conversation_item_input_audio_transcription_delta_event import (
+ ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent,
+)
+from .conversation_item_input_audio_transcription_failed_event import (
+ ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent,
+)
+from .conversation_item_input_audio_transcription_completed_event import (
+ ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py
new file mode 100644
index 00000000..4ba05408
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationCreatedEvent", "Conversation"]
+
+
+class Conversation(BaseModel):
+ id: Optional[str] = None
+ """The unique ID of the conversation."""
+
+ object: Optional[Literal["realtime.conversation"]] = None
+ """The object type, must be `realtime.conversation`."""
+
+
+class ConversationCreatedEvent(BaseModel):
+ conversation: Conversation
+ """The conversation resource."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ type: Literal["conversation.created"]
+ """The event type, must be `conversation.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py
new file mode 100644
index 00000000..4edf6c4d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py
@@ -0,0 +1,61 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item_content import ConversationItemContent
+
+__all__ = ["ConversationItem"]
+
+
+class ConversationItem(BaseModel):
+ id: Optional[str] = None
+ """
+ The unique ID of the item, this can be generated by the client to help manage
+ server-side context, but is not required because the server will generate one if
+ not provided.
+ """
+
+ arguments: Optional[str] = None
+ """The arguments of the function call (for `function_call` items)."""
+
+ call_id: Optional[str] = None
+ """
+ The ID of the function call (for `function_call` and `function_call_output`
+ items). If passed on a `function_call_output` item, the server will check that a
+ `function_call` item with the same ID exists in the conversation history.
+ """
+
+ content: Optional[List[ConversationItemContent]] = None
+ """The content of the message, applicable for `message` items.
+
+ - Message items of role `system` support only `input_text` content
+ - Message items of role `user` support `input_text` and `input_audio` content
+ - Message items of role `assistant` support `text` content.
+ """
+
+ name: Optional[str] = None
+ """The name of the function being called (for `function_call` items)."""
+
+ object: Optional[Literal["realtime.item"]] = None
+ """Identifier for the API object being returned - always `realtime.item`."""
+
+ output: Optional[str] = None
+ """The output of the function call (for `function_call_output` items)."""
+
+ role: Optional[Literal["user", "assistant", "system"]] = None
+ """
+ The role of the message sender (`user`, `assistant`, `system`), only applicable
+ for `message` items.
+ """
+
+ status: Optional[Literal["completed", "incomplete"]] = None
+ """The status of the item (`completed`, `incomplete`).
+
+ These have no effect on the conversation, but are accepted for consistency with
+ the `conversation.item.created` event.
+ """
+
+ type: Optional[Literal["message", "function_call", "function_call_output"]] = None
+ """The type of the item (`message`, `function_call`, `function_call_output`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py
new file mode 100644
index 00000000..ab40a4a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemContent"]
+
+
+class ConversationItemContent(BaseModel):
+ id: Optional[str] = None
+ """
+ ID of a previous conversation item to reference (for `item_reference` content
+ types in `response.create` events). These can reference both client and server
+ created items.
+ """
+
+ audio: Optional[str] = None
+ """Base64-encoded audio bytes, used for `input_audio` content type."""
+
+ text: Optional[str] = None
+ """The text content, used for `input_text` and `text` content types."""
+
+ transcript: Optional[str] = None
+ """The transcript of the audio, used for `input_audio` content type."""
+
+ type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None
+ """The content type (`input_text`, `input_audio`, `item_reference`, `text`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py
new file mode 100644
index 00000000..7a3a92a3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ConversationItemContentParam"]
+
+
+class ConversationItemContentParam(TypedDict, total=False):
+ id: str
+ """
+ ID of a previous conversation item to reference (for `item_reference` content
+ types in `response.create` events). These can reference both client and server
+ created items.
+ """
+
+ audio: str
+ """Base64-encoded audio bytes, used for `input_audio` content type."""
+
+ text: str
+ """The text content, used for `input_text` and `text` content types."""
+
+ transcript: str
+ """The transcript of the audio, used for `input_audio` content type."""
+
+ type: Literal["input_text", "input_audio", "item_reference", "text"]
+ """The content type (`input_text`, `input_audio`, `item_reference`, `text`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py
new file mode 100644
index 00000000..f19d552a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ConversationItemCreateEvent"]
+
+
+class ConversationItemCreateEvent(BaseModel):
+ item: ConversationItem
+ """The item to add to the conversation."""
+
+ type: Literal["conversation.item.create"]
+ """The event type, must be `conversation.item.create`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
+
+ previous_item_id: Optional[str] = None
+ """The ID of the preceding item after which the new item will be inserted.
+
+ If not set, the new item will be appended to the end of the conversation. If set
+ to `root`, the new item will be added to the beginning of the conversation. If
+ set to an existing ID, it allows an item to be inserted mid-conversation. If the
+ ID cannot be found, an error will be returned and the item will not be added.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py
new file mode 100644
index 00000000..693d0fd5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .conversation_item_param import ConversationItemParam
+
+__all__ = ["ConversationItemCreateEventParam"]
+
+
+class ConversationItemCreateEventParam(TypedDict, total=False):
+ item: Required[ConversationItemParam]
+ """The item to add to the conversation."""
+
+ type: Required[Literal["conversation.item.create"]]
+ """The event type, must be `conversation.item.create`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
+
+ previous_item_id: str
+ """The ID of the preceding item after which the new item will be inserted.
+
+ If not set, the new item will be appended to the end of the conversation. If set
+ to `root`, the new item will be added to the beginning of the conversation. If
+ set to an existing ID, it allows an item to be inserted mid-conversation. If the
+ ID cannot be found, an error will be returned and the item will not be added.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py
new file mode 100644
index 00000000..2f203882
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ConversationItemCreatedEvent"]
+
+
+class ConversationItemCreatedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ item: ConversationItem
+ """The item to add to the conversation."""
+
+ previous_item_id: str
+ """
+ The ID of the preceding item in the Conversation context, allows the client to
+ understand the order of the conversation.
+ """
+
+ type: Literal["conversation.item.created"]
+ """The event type, must be `conversation.item.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py
new file mode 100644
index 00000000..02ca8250
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemDeleteEvent"]
+
+
+class ConversationItemDeleteEvent(BaseModel):
+ item_id: str
+ """The ID of the item to delete."""
+
+ type: Literal["conversation.item.delete"]
+ """The event type, must be `conversation.item.delete`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py
new file mode 100644
index 00000000..c3f88d66
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConversationItemDeleteEventParam"]
+
+
+class ConversationItemDeleteEventParam(TypedDict, total=False):
+ item_id: Required[str]
+ """The ID of the item to delete."""
+
+ type: Required[Literal["conversation.item.delete"]]
+ """The event type, must be `conversation.item.delete`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py
new file mode 100644
index 00000000..a35a9781
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemDeletedEvent"]
+
+
+class ConversationItemDeletedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item that was deleted."""
+
+ type: Literal["conversation.item.deleted"]
+ """The event type, must be `conversation.item.deleted`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py
new file mode 100644
index 00000000..46981169
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+ token: str
+ """The token that was used to generate the log probability."""
+
+ bytes: List[int]
+ """The bytes that were used to generate the log probability."""
+
+ logprob: float
+ """The log probability of the token."""
+
+
+class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel):
+ content_index: int
+ """The index of the content part containing the audio."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the user message item containing the audio."""
+
+ transcript: str
+ """The transcribed text."""
+
+ type: Literal["conversation.item.input_audio_transcription.completed"]
+ """
+ The event type, must be `conversation.item.input_audio_transcription.completed`.
+ """
+
+ logprobs: Optional[List[Logprob]] = None
+ """The log probabilities of the transcription."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py
new file mode 100644
index 00000000..924d06d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemInputAudioTranscriptionDeltaEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+ token: str
+ """The token that was used to generate the log probability."""
+
+ bytes: List[int]
+ """The bytes that were used to generate the log probability."""
+
+ logprob: float
+ """The log probability of the token."""
+
+
+class ConversationItemInputAudioTranscriptionDeltaEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ type: Literal["conversation.item.input_audio_transcription.delta"]
+ """The event type, must be `conversation.item.input_audio_transcription.delta`."""
+
+ content_index: Optional[int] = None
+ """The index of the content part in the item's content array."""
+
+ delta: Optional[str] = None
+ """The text delta."""
+
+ logprobs: Optional[List[Logprob]] = None
+ """The log probabilities of the transcription."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py
new file mode 100644
index 00000000..cecac93e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemInputAudioTranscriptionFailedEvent", "Error"]
+
+
+class Error(BaseModel):
+ code: Optional[str] = None
+ """Error code, if any."""
+
+ message: Optional[str] = None
+ """A human-readable error message."""
+
+ param: Optional[str] = None
+ """Parameter related to the error, if any."""
+
+ type: Optional[str] = None
+ """The type of error."""
+
+
+class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel):
+ content_index: int
+ """The index of the content part containing the audio."""
+
+ error: Error
+ """Details of the transcription error."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the user message item."""
+
+ type: Literal["conversation.item.input_audio_transcription.failed"]
+ """The event type, must be `conversation.item.input_audio_transcription.failed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py
new file mode 100644
index 00000000..ac0f8431
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py
@@ -0,0 +1,62 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, TypedDict
+
+from .conversation_item_content_param import ConversationItemContentParam
+
+__all__ = ["ConversationItemParam"]
+
+
+class ConversationItemParam(TypedDict, total=False):
+ id: str
+ """
+ The unique ID of the item, this can be generated by the client to help manage
+ server-side context, but is not required because the server will generate one if
+ not provided.
+ """
+
+ arguments: str
+ """The arguments of the function call (for `function_call` items)."""
+
+ call_id: str
+ """
+ The ID of the function call (for `function_call` and `function_call_output`
+ items). If passed on a `function_call_output` item, the server will check that a
+ `function_call` item with the same ID exists in the conversation history.
+ """
+
+ content: Iterable[ConversationItemContentParam]
+ """The content of the message, applicable for `message` items.
+
+ - Message items of role `system` support only `input_text` content
+ - Message items of role `user` support `input_text` and `input_audio` content
+ - Message items of role `assistant` support `text` content.
+ """
+
+ name: str
+ """The name of the function being called (for `function_call` items)."""
+
+ object: Literal["realtime.item"]
+ """Identifier for the API object being returned - always `realtime.item`."""
+
+ output: str
+ """The output of the function call (for `function_call_output` items)."""
+
+ role: Literal["user", "assistant", "system"]
+ """
+ The role of the message sender (`user`, `assistant`, `system`), only applicable
+ for `message` items.
+ """
+
+ status: Literal["completed", "incomplete"]
+ """The status of the item (`completed`, `incomplete`).
+
+ These have no effect on the conversation, but are accepted for consistency with
+ the `conversation.item.created` event.
+ """
+
+ type: Literal["message", "function_call", "function_call_output"]
+ """The type of the item (`message`, `function_call`, `function_call_output`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py
new file mode 100644
index 00000000..82238605
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemRetrieveEvent"]
+
+
+class ConversationItemRetrieveEvent(BaseModel):
+ item_id: str
+ """The ID of the item to retrieve."""
+
+ type: Literal["conversation.item.retrieve"]
+ """The event type, must be `conversation.item.retrieve`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py
new file mode 100644
index 00000000..71b3ffa4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConversationItemRetrieveEventParam"]
+
+
+class ConversationItemRetrieveEventParam(TypedDict, total=False):
+ item_id: Required[str]
+ """The ID of the item to retrieve."""
+
+ type: Required[Literal["conversation.item.retrieve"]]
+ """The event type, must be `conversation.item.retrieve`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py
new file mode 100644
index 00000000..cb336bba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemTruncateEvent"]
+
+
+class ConversationItemTruncateEvent(BaseModel):
+ audio_end_ms: int
+ """Inclusive duration up to which audio is truncated, in milliseconds.
+
+ If the audio_end_ms is greater than the actual audio duration, the server will
+ respond with an error.
+ """
+
+ content_index: int
+ """The index of the content part to truncate. Set this to 0."""
+
+ item_id: str
+ """The ID of the assistant message item to truncate.
+
+ Only assistant message items can be truncated.
+ """
+
+ type: Literal["conversation.item.truncate"]
+ """The event type, must be `conversation.item.truncate`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py
new file mode 100644
index 00000000..d3ad1e1e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConversationItemTruncateEventParam"]
+
+
+class ConversationItemTruncateEventParam(TypedDict, total=False):
+ audio_end_ms: Required[int]
+ """Inclusive duration up to which audio is truncated, in milliseconds.
+
+ If the audio_end_ms is greater than the actual audio duration, the server will
+ respond with an error.
+ """
+
+ content_index: Required[int]
+ """The index of the content part to truncate. Set this to 0."""
+
+ item_id: Required[str]
+ """The ID of the assistant message item to truncate.
+
+ Only assistant message items can be truncated.
+ """
+
+ type: Required[Literal["conversation.item.truncate"]]
+ """The event type, must be `conversation.item.truncate`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py
new file mode 100644
index 00000000..36368fa2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemTruncatedEvent"]
+
+
+class ConversationItemTruncatedEvent(BaseModel):
+ audio_end_ms: int
+ """The duration up to which the audio was truncated, in milliseconds."""
+
+ content_index: int
+ """The index of the content part that was truncated."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the assistant message item that was truncated."""
+
+ type: Literal["conversation.item.truncated"]
+ """The event type, must be `conversation.item.truncated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py
new file mode 100644
index 00000000..31806afc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item_content import ConversationItemContent
+
+__all__ = ["ConversationItemWithReference"]
+
+
+class ConversationItemWithReference(BaseModel):
+ id: Optional[str] = None
+ """
+ For an item of type (`message` | `function_call` | `function_call_output`) this
+ field allows the client to assign the unique ID of the item. It is not required
+ because the server will generate one if not provided.
+
+ For an item of type `item_reference`, this field is required and is a reference
+ to any item that has previously existed in the conversation.
+ """
+
+ arguments: Optional[str] = None
+ """The arguments of the function call (for `function_call` items)."""
+
+ call_id: Optional[str] = None
+ """
+ The ID of the function call (for `function_call` and `function_call_output`
+ items). If passed on a `function_call_output` item, the server will check that a
+ `function_call` item with the same ID exists in the conversation history.
+ """
+
+ content: Optional[List[ConversationItemContent]] = None
+ """The content of the message, applicable for `message` items.
+
+ - Message items of role `system` support only `input_text` content
+ - Message items of role `user` support `input_text` and `input_audio` content
+ - Message items of role `assistant` support `text` content.
+ """
+
+ name: Optional[str] = None
+ """The name of the function being called (for `function_call` items)."""
+
+ object: Optional[Literal["realtime.item"]] = None
+ """Identifier for the API object being returned - always `realtime.item`."""
+
+ output: Optional[str] = None
+ """The output of the function call (for `function_call_output` items)."""
+
+ role: Optional[Literal["user", "assistant", "system"]] = None
+ """
+ The role of the message sender (`user`, `assistant`, `system`), only applicable
+ for `message` items.
+ """
+
+ status: Optional[Literal["completed", "incomplete"]] = None
+ """The status of the item (`completed`, `incomplete`).
+
+ These have no effect on the conversation, but are accepted for consistency with
+ the `conversation.item.created` event.
+ """
+
+ type: Optional[Literal["message", "function_call", "function_call_output", "item_reference"]] = None
+ """
+ The type of the item (`message`, `function_call`, `function_call_output`,
+ `item_reference`).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py
new file mode 100644
index 00000000..e266cdce
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py
@@ -0,0 +1,68 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, TypedDict
+
+from .conversation_item_content_param import ConversationItemContentParam
+
+__all__ = ["ConversationItemWithReferenceParam"]
+
+
+class ConversationItemWithReferenceParam(TypedDict, total=False):
+ id: str
+ """
+ For an item of type (`message` | `function_call` | `function_call_output`) this
+ field allows the client to assign the unique ID of the item. It is not required
+ because the server will generate one if not provided.
+
+ For an item of type `item_reference`, this field is required and is a reference
+ to any item that has previously existed in the conversation.
+ """
+
+ arguments: str
+ """The arguments of the function call (for `function_call` items)."""
+
+ call_id: str
+ """
+ The ID of the function call (for `function_call` and `function_call_output`
+ items). If passed on a `function_call_output` item, the server will check that a
+ `function_call` item with the same ID exists in the conversation history.
+ """
+
+ content: Iterable[ConversationItemContentParam]
+ """The content of the message, applicable for `message` items.
+
+ - Message items of role `system` support only `input_text` content
+ - Message items of role `user` support `input_text` and `input_audio` content
+ - Message items of role `assistant` support `text` content.
+ """
+
+ name: str
+ """The name of the function being called (for `function_call` items)."""
+
+ object: Literal["realtime.item"]
+ """Identifier for the API object being returned - always `realtime.item`."""
+
+ output: str
+ """The output of the function call (for `function_call_output` items)."""
+
+ role: Literal["user", "assistant", "system"]
+ """
+ The role of the message sender (`user`, `assistant`, `system`), only applicable
+ for `message` items.
+ """
+
+ status: Literal["completed", "incomplete"]
+ """The status of the item (`completed`, `incomplete`).
+
+ These have no effect on the conversation, but are accepted for consistency with
+ the `conversation.item.created` event.
+ """
+
+ type: Literal["message", "function_call", "function_call_output", "item_reference"]
+ """
+ The type of the item (`message`, `function_call`, `function_call_output`,
+ `item_reference`).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py
new file mode 100644
index 00000000..e020fc38
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ErrorEvent", "Error"]
+
+
+class Error(BaseModel):
+ message: str
+ """A human-readable error message."""
+
+ type: str
+ """The type of error (e.g., "invalid_request_error", "server_error")."""
+
+ code: Optional[str] = None
+ """Error code, if any."""
+
+ event_id: Optional[str] = None
+ """The event_id of the client event that caused the error, if applicable."""
+
+ param: Optional[str] = None
+ """Parameter related to the error, if any."""
+
+
+class ErrorEvent(BaseModel):
+ error: Error
+ """Details of the error."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ type: Literal["error"]
+ """The event type, must be `error`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py
new file mode 100644
index 00000000..a253a648
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferAppendEvent"]
+
+
+class InputAudioBufferAppendEvent(BaseModel):
+ audio: str
+ """Base64-encoded audio bytes.
+
+ This must be in the format specified by the `input_audio_format` field in the
+ session configuration.
+ """
+
+ type: Literal["input_audio_buffer.append"]
+ """The event type, must be `input_audio_buffer.append`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py
new file mode 100644
index 00000000..3ad0bc73
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InputAudioBufferAppendEventParam"]
+
+
+class InputAudioBufferAppendEventParam(TypedDict, total=False):
+ audio: Required[str]
+ """Base64-encoded audio bytes.
+
+ This must be in the format specified by the `input_audio_format` field in the
+ session configuration.
+ """
+
+ type: Required[Literal["input_audio_buffer.append"]]
+ """The event type, must be `input_audio_buffer.append`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py
new file mode 100644
index 00000000..b0624d34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferClearEvent"]
+
+
+class InputAudioBufferClearEvent(BaseModel):
+ type: Literal["input_audio_buffer.clear"]
+ """The event type, must be `input_audio_buffer.clear`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py
new file mode 100644
index 00000000..2bd6bc5a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InputAudioBufferClearEventParam"]
+
+
+class InputAudioBufferClearEventParam(TypedDict, total=False):
+ type: Required[Literal["input_audio_buffer.clear"]]
+ """The event type, must be `input_audio_buffer.clear`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py
new file mode 100644
index 00000000..632e1b94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferClearedEvent"]
+
+
+class InputAudioBufferClearedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ type: Literal["input_audio_buffer.cleared"]
+ """The event type, must be `input_audio_buffer.cleared`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py
new file mode 100644
index 00000000..7b6f5e46
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferCommitEvent"]
+
+
+class InputAudioBufferCommitEvent(BaseModel):
+ type: Literal["input_audio_buffer.commit"]
+ """The event type, must be `input_audio_buffer.commit`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py
new file mode 100644
index 00000000..c9c927ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InputAudioBufferCommitEventParam"]
+
+
+class InputAudioBufferCommitEventParam(TypedDict, total=False):
+ type: Required[Literal["input_audio_buffer.commit"]]
+ """The event type, must be `input_audio_buffer.commit`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py
new file mode 100644
index 00000000..3071eff3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferCommittedEvent"]
+
+
+class InputAudioBufferCommittedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the user message item that will be created."""
+
+ previous_item_id: str
+ """The ID of the preceding item after which the new item will be inserted."""
+
+ type: Literal["input_audio_buffer.committed"]
+ """The event type, must be `input_audio_buffer.committed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py
new file mode 100644
index 00000000..4f3ab082
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferSpeechStartedEvent"]
+
+
+class InputAudioBufferSpeechStartedEvent(BaseModel):
+ audio_start_ms: int
+ """
+ Milliseconds from the start of all audio written to the buffer during the
+ session when speech was first detected. This will correspond to the beginning of
+ audio sent to the model, and thus includes the `prefix_padding_ms` configured in
+ the Session.
+ """
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the user message item that will be created when speech stops."""
+
+ type: Literal["input_audio_buffer.speech_started"]
+ """The event type, must be `input_audio_buffer.speech_started`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py
new file mode 100644
index 00000000..40568170
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferSpeechStoppedEvent"]
+
+
+class InputAudioBufferSpeechStoppedEvent(BaseModel):
+ audio_end_ms: int
+ """Milliseconds since the session started when speech stopped.
+
+ This will correspond to the end of audio sent to the model, and thus includes
+ the `min_silence_duration_ms` configured in the Session.
+ """
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the user message item that will be created."""
+
+ type: Literal["input_audio_buffer.speech_stopped"]
+ """The event type, must be `input_audio_buffer.speech_stopped`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py
new file mode 100644
index 00000000..7e12283c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RateLimitsUpdatedEvent", "RateLimit"]
+
+
+class RateLimit(BaseModel):
+ limit: Optional[int] = None
+ """The maximum allowed value for the rate limit."""
+
+ name: Optional[Literal["requests", "tokens"]] = None
+ """The name of the rate limit (`requests`, `tokens`)."""
+
+ remaining: Optional[int] = None
+ """The remaining value before the limit is reached."""
+
+ reset_seconds: Optional[float] = None
+ """Seconds until the rate limit resets."""
+
+
+class RateLimitsUpdatedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ rate_limits: List[RateLimit]
+ """List of rate limit information."""
+
+ type: Literal["rate_limits.updated"]
+ """The event type, must be `rate_limits.updated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py
new file mode 100644
index 00000000..f962a505
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .session_update_event import SessionUpdateEvent
+from .response_cancel_event import ResponseCancelEvent
+from .response_create_event import ResponseCreateEvent
+from .transcription_session_update import TranscriptionSessionUpdate
+from .conversation_item_create_event import ConversationItemCreateEvent
+from .conversation_item_delete_event import ConversationItemDeleteEvent
+from .input_audio_buffer_clear_event import InputAudioBufferClearEvent
+from .input_audio_buffer_append_event import InputAudioBufferAppendEvent
+from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent
+from .conversation_item_retrieve_event import ConversationItemRetrieveEvent
+from .conversation_item_truncate_event import ConversationItemTruncateEvent
+
+__all__ = ["RealtimeClientEvent"]
+
+RealtimeClientEvent: TypeAlias = Annotated[
+ Union[
+ ConversationItemCreateEvent,
+ ConversationItemDeleteEvent,
+ ConversationItemRetrieveEvent,
+ ConversationItemTruncateEvent,
+ InputAudioBufferAppendEvent,
+ InputAudioBufferClearEvent,
+ InputAudioBufferCommitEvent,
+ ResponseCancelEvent,
+ ResponseCreateEvent,
+ SessionUpdateEvent,
+ TranscriptionSessionUpdate,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py
new file mode 100644
index 00000000..6fdba4b8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .session_update_event_param import SessionUpdateEventParam
+from .response_cancel_event_param import ResponseCancelEventParam
+from .response_create_event_param import ResponseCreateEventParam
+from .transcription_session_update_param import TranscriptionSessionUpdateParam
+from .conversation_item_create_event_param import ConversationItemCreateEventParam
+from .conversation_item_delete_event_param import ConversationItemDeleteEventParam
+from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam
+from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam
+from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam
+from .conversation_item_retrieve_event_param import ConversationItemRetrieveEventParam
+from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam
+
+__all__ = ["RealtimeClientEventParam"]
+
+RealtimeClientEventParam: TypeAlias = Union[
+ ConversationItemCreateEventParam,
+ ConversationItemDeleteEventParam,
+ ConversationItemRetrieveEventParam,
+ ConversationItemTruncateEventParam,
+ InputAudioBufferAppendEventParam,
+ InputAudioBufferClearEventParam,
+ InputAudioBufferCommitEventParam,
+ ResponseCancelEventParam,
+ ResponseCreateEventParam,
+ SessionUpdateEventParam,
+ TranscriptionSessionUpdateParam,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py
new file mode 100644
index 00000000..76474f3d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["RealtimeConnectParams"]
+
+
+class RealtimeConnectParams(TypedDict, total=False):
+ model: Required[str]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py
new file mode 100644
index 00000000..4c3c83d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py
@@ -0,0 +1,87 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from ...shared.metadata import Metadata
+from .conversation_item import ConversationItem
+from .realtime_response_usage import RealtimeResponseUsage
+from .realtime_response_status import RealtimeResponseStatus
+
+__all__ = ["RealtimeResponse"]
+
+
+class RealtimeResponse(BaseModel):
+ id: Optional[str] = None
+ """The unique ID of the response."""
+
+ conversation_id: Optional[str] = None
+ """
+ Which conversation the response is added to, determined by the `conversation`
+ field in the `response.create` event. If `auto`, the response will be added to
+ the default conversation and the value of `conversation_id` will be an id like
+ `conv_1234`. If `none`, the response will not be added to any conversation and
+ the value of `conversation_id` will be `null`. If responses are being triggered
+ by server VAD, the response will be added to the default conversation, thus the
+ `conversation_id` will be an id like `conv_1234`.
+ """
+
+ max_output_tokens: Union[int, Literal["inf"], None] = None
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls, that was used in this response.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model used to respond.
+
+ If there are multiple modalities, the model will pick one, for example if
+ `modalities` is `["text", "audio"]`, the model could be responding in either
+ text or audio.
+ """
+
+ object: Optional[Literal["realtime.response"]] = None
+ """The object type, must be `realtime.response`."""
+
+ output: Optional[List[ConversationItem]] = None
+ """The list of output items generated by the response."""
+
+ output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+ status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None
+ """
+ The final status of the response (`completed`, `cancelled`, `failed`, or
+ `incomplete`).
+ """
+
+ status_details: Optional[RealtimeResponseStatus] = None
+ """Additional details about the status."""
+
+ temperature: Optional[float] = None
+ """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+ usage: Optional[RealtimeResponseUsage] = None
+ """Usage statistics for the Response, this will correspond to billing.
+
+ A Realtime API session will maintain a conversation context and append new Items
+ to the Conversation, thus output from previous turns (text and audio tokens)
+ will become the input for later turns.
+ """
+
+ voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+ """
+ The voice the model used to respond. Current voice options are `alloy`, `ash`,
+ `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py
new file mode 100644
index 00000000..7189cd58
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RealtimeResponseStatus", "Error"]
+
+
+class Error(BaseModel):
+ code: Optional[str] = None
+ """Error code, if any."""
+
+ type: Optional[str] = None
+ """The type of error."""
+
+
+class RealtimeResponseStatus(BaseModel):
+ error: Optional[Error] = None
+ """
+ A description of the error that caused the response to fail, populated when the
+ `status` is `failed`.
+ """
+
+ reason: Optional[Literal["turn_detected", "client_cancelled", "max_output_tokens", "content_filter"]] = None
+ """The reason the Response did not complete.
+
+ For a `cancelled` Response, one of `turn_detected` (the server VAD detected a
+ new start of speech) or `client_cancelled` (the client sent a cancel event). For
+ an `incomplete` Response, one of `max_output_tokens` or `content_filter` (the
+ server-side safety filter activated and cut off the response).
+ """
+
+ type: Optional[Literal["completed", "cancelled", "incomplete", "failed"]] = None
+ """
+ The type of error that caused the response to fail, corresponding with the
+ `status` field (`completed`, `cancelled`, `incomplete`, `failed`).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py
new file mode 100644
index 00000000..7ca822e2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["RealtimeResponseUsage", "InputTokenDetails", "OutputTokenDetails"]
+
+
+class InputTokenDetails(BaseModel):
+ audio_tokens: Optional[int] = None
+ """The number of audio tokens used in the Response."""
+
+ cached_tokens: Optional[int] = None
+ """The number of cached tokens used in the Response."""
+
+ text_tokens: Optional[int] = None
+ """The number of text tokens used in the Response."""
+
+
+class OutputTokenDetails(BaseModel):
+ audio_tokens: Optional[int] = None
+ """The number of audio tokens used in the Response."""
+
+ text_tokens: Optional[int] = None
+ """The number of text tokens used in the Response."""
+
+
+class RealtimeResponseUsage(BaseModel):
+ input_token_details: Optional[InputTokenDetails] = None
+ """Details about the input tokens used in the Response."""
+
+ input_tokens: Optional[int] = None
+ """
+ The number of input tokens used in the Response, including text and audio
+ tokens.
+ """
+
+ output_token_details: Optional[OutputTokenDetails] = None
+ """Details about the output tokens used in the Response."""
+
+ output_tokens: Optional[int] = None
+ """
+ The number of output tokens sent in the Response, including text and audio
+ tokens.
+ """
+
+ total_tokens: Optional[int] = None
+ """
+ The total number of tokens in the Response including input and output text and
+ audio tokens.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py
new file mode 100644
index 00000000..ba1d3244
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py
@@ -0,0 +1,91 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+from .error_event import ErrorEvent
+from .conversation_item import ConversationItem
+from .response_done_event import ResponseDoneEvent
+from .session_created_event import SessionCreatedEvent
+from .session_updated_event import SessionUpdatedEvent
+from .response_created_event import ResponseCreatedEvent
+from .response_text_done_event import ResponseTextDoneEvent
+from .rate_limits_updated_event import RateLimitsUpdatedEvent
+from .response_audio_done_event import ResponseAudioDoneEvent
+from .response_text_delta_event import ResponseTextDeltaEvent
+from .conversation_created_event import ConversationCreatedEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent
+from .conversation_item_created_event import ConversationItemCreatedEvent
+from .conversation_item_deleted_event import ConversationItemDeletedEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent
+from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent
+from .conversation_item_truncated_event import ConversationItemTruncatedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent
+from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent
+from .transcription_session_updated_event import TranscriptionSessionUpdatedEvent
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
+from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
+from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent
+from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent
+from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
+from .conversation_item_input_audio_transcription_delta_event import ConversationItemInputAudioTranscriptionDeltaEvent
+from .conversation_item_input_audio_transcription_failed_event import ConversationItemInputAudioTranscriptionFailedEvent
+from .conversation_item_input_audio_transcription_completed_event import (
+ ConversationItemInputAudioTranscriptionCompletedEvent,
+)
+
+__all__ = ["RealtimeServerEvent", "ConversationItemRetrieved"]
+
+
+class ConversationItemRetrieved(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ item: ConversationItem
+ """The item to add to the conversation."""
+
+ type: Literal["conversation.item.retrieved"]
+ """The event type, must be `conversation.item.retrieved`."""
+
+
+RealtimeServerEvent: TypeAlias = Annotated[
+ Union[
+ ConversationCreatedEvent,
+ ConversationItemCreatedEvent,
+ ConversationItemDeletedEvent,
+ ConversationItemInputAudioTranscriptionCompletedEvent,
+ ConversationItemInputAudioTranscriptionDeltaEvent,
+ ConversationItemInputAudioTranscriptionFailedEvent,
+ ConversationItemRetrieved,
+ ConversationItemTruncatedEvent,
+ ErrorEvent,
+ InputAudioBufferClearedEvent,
+ InputAudioBufferCommittedEvent,
+ InputAudioBufferSpeechStartedEvent,
+ InputAudioBufferSpeechStoppedEvent,
+ RateLimitsUpdatedEvent,
+ ResponseAudioDeltaEvent,
+ ResponseAudioDoneEvent,
+ ResponseAudioTranscriptDeltaEvent,
+ ResponseAudioTranscriptDoneEvent,
+ ResponseContentPartAddedEvent,
+ ResponseContentPartDoneEvent,
+ ResponseCreatedEvent,
+ ResponseDoneEvent,
+ ResponseFunctionCallArgumentsDeltaEvent,
+ ResponseFunctionCallArgumentsDoneEvent,
+ ResponseOutputItemAddedEvent,
+ ResponseOutputItemDoneEvent,
+ ResponseTextDeltaEvent,
+ ResponseTextDoneEvent,
+ SessionCreatedEvent,
+ SessionUpdatedEvent,
+ TranscriptionSessionUpdatedEvent,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py
new file mode 100644
index 00000000..8e0128d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioDeltaEvent"]
+
+
+class ResponseAudioDeltaEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ delta: str
+ """Base64-encoded audio data delta."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.audio.delta"]
+ """The event type, must be `response.audio.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py
new file mode 100644
index 00000000..68e78bc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioDoneEvent"]
+
+
+class ResponseAudioDoneEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.audio.done"]
+ """The event type, must be `response.audio.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py
new file mode 100644
index 00000000..3609948d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDeltaEvent"]
+
+
+class ResponseAudioTranscriptDeltaEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ delta: str
+ """The transcript delta."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.audio_transcript.delta"]
+ """The event type, must be `response.audio_transcript.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py
new file mode 100644
index 00000000..4e4436a9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDoneEvent"]
+
+
+class ResponseAudioTranscriptDoneEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ transcript: str
+ """The final transcript of the audio."""
+
+ type: Literal["response.audio_transcript.done"]
+ """The event type, must be `response.audio_transcript.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py
new file mode 100644
index 00000000..c5ff991e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseCancelEvent"]
+
+
+class ResponseCancelEvent(BaseModel):
+ type: Literal["response.cancel"]
+ """The event type, must be `response.cancel`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
+
+ response_id: Optional[str] = None
+ """
+ A specific response ID to cancel - if not provided, will cancel an in-progress
+ response in the default conversation.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py
new file mode 100644
index 00000000..f3374073
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseCancelEventParam"]
+
+
+class ResponseCancelEventParam(TypedDict, total=False):
+ type: Required[Literal["response.cancel"]]
+ """The event type, must be `response.cancel`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
+
+ response_id: str
+ """
+ A specific response ID to cancel - if not provided, will cancel an in-progress
+ response in the default conversation.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py
new file mode 100644
index 00000000..45c8f20f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseContentPartAddedEvent", "Part"]
+
+
+class Part(BaseModel):
+ audio: Optional[str] = None
+ """Base64-encoded audio data (if type is "audio")."""
+
+ text: Optional[str] = None
+ """The text content (if type is "text")."""
+
+ transcript: Optional[str] = None
+ """The transcript of the audio (if type is "audio")."""
+
+ type: Optional[Literal["text", "audio"]] = None
+ """The content type ("text", "audio")."""
+
+
+class ResponseContentPartAddedEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item to which the content part was added."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ part: Part
+ """The content part that was added."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.content_part.added"]
+ """The event type, must be `response.content_part.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py
new file mode 100644
index 00000000..3d161161
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseContentPartDoneEvent", "Part"]
+
+
+class Part(BaseModel):
+ audio: Optional[str] = None
+ """Base64-encoded audio data (if type is "audio")."""
+
+ text: Optional[str] = None
+ """The text content (if type is "text")."""
+
+ transcript: Optional[str] = None
+ """The transcript of the audio (if type is "audio")."""
+
+ type: Optional[Literal["text", "audio"]] = None
+ """The content type ("text", "audio")."""
+
+
+class ResponseContentPartDoneEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ part: Part
+ """The content part that is done."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.content_part.done"]
+ """The event type, must be `response.content_part.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py
new file mode 100644
index 00000000..d6c5fda9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py
@@ -0,0 +1,121 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from ...shared.metadata import Metadata
+from .conversation_item_with_reference import ConversationItemWithReference
+
+__all__ = ["ResponseCreateEvent", "Response", "ResponseTool"]
+
+
+class ResponseTool(BaseModel):
+ description: Optional[str] = None
+ """
+ The description of the function, including guidance on when and how to call it,
+ and guidance about what to tell the user when calling (if anything).
+ """
+
+ name: Optional[str] = None
+ """The name of the function."""
+
+ parameters: Optional[object] = None
+ """Parameters of the function in JSON Schema."""
+
+ type: Optional[Literal["function"]] = None
+ """The type of the tool, i.e. `function`."""
+
+
+class Response(BaseModel):
+ conversation: Union[str, Literal["auto", "none"], None] = None
+ """Controls which conversation the response is added to.
+
+ Currently supports `auto` and `none`, with `auto` as the default value. The
+ `auto` value means that the contents of the response will be added to the
+ default conversation. Set this to `none` to create an out-of-band response which
+ will not add items to default conversation.
+ """
+
+ input: Optional[List[ConversationItemWithReference]] = None
+ """Input items to include in the prompt for the model.
+
+ Using this field creates a new context for this Response instead of using the
+ default conversation. An empty array `[]` will clear the context for this
+ Response. Note that this can include references to items from the default
+ conversation.
+ """
+
+ instructions: Optional[str] = None
+ """The default system instructions (i.e.
+
+ system message) prepended to model calls. This field allows the client to guide
+ the model on desired responses. The model can be instructed on response content
+ and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+ good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ into your voice", "laugh frequently"). The instructions are not guaranteed to be
+ followed by the model, but they provide guidance to the model on the desired
+ behavior.
+
+ Note that the server sets default instructions which will be used if this field
+ is not set and are visible in the `session.created` event at the start of the
+ session.
+ """
+
+ max_response_output_tokens: Union[int, Literal["inf"], None] = None
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+ temperature: Optional[float] = None
+ """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+ tool_choice: Optional[str] = None
+ """How the model chooses tools.
+
+ Options are `auto`, `none`, `required`, or specify a function, like
+ `{"type": "function", "function": {"name": "my_function"}}`.
+ """
+
+ tools: Optional[List[ResponseTool]] = None
+ """Tools (functions) available to the model."""
+
+ voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+ """The voice the model uses to respond.
+
+ Voice cannot be changed during the session once the model has responded with
+ audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+ `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
+
+
+class ResponseCreateEvent(BaseModel):
+ type: Literal["response.create"]
+ """The event type, must be `response.create`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
+
+ response: Optional[Response] = None
+ """Create a new Realtime response with these parameters"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py
new file mode 100644
index 00000000..c02fe1b3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py
@@ -0,0 +1,122 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ...shared_params.metadata import Metadata
+from .conversation_item_with_reference_param import ConversationItemWithReferenceParam
+
+__all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"]
+
+
+class ResponseTool(TypedDict, total=False):
+ description: str
+ """
+ The description of the function, including guidance on when and how to call it,
+ and guidance about what to tell the user when calling (if anything).
+ """
+
+ name: str
+ """The name of the function."""
+
+ parameters: object
+ """Parameters of the function in JSON Schema."""
+
+ type: Literal["function"]
+ """The type of the tool, i.e. `function`."""
+
+
+class Response(TypedDict, total=False):
+ conversation: Union[str, Literal["auto", "none"]]
+ """Controls which conversation the response is added to.
+
+ Currently supports `auto` and `none`, with `auto` as the default value. The
+ `auto` value means that the contents of the response will be added to the
+ default conversation. Set this to `none` to create an out-of-band response which
+ will not add items to default conversation.
+ """
+
+ input: Iterable[ConversationItemWithReferenceParam]
+ """Input items to include in the prompt for the model.
+
+ Using this field creates a new context for this Response instead of using the
+ default conversation. An empty array `[]` will clear the context for this
+ Response. Note that this can include references to items from the default
+ conversation.
+ """
+
+ instructions: str
+ """The default system instructions (i.e.
+
+ system message) prepended to model calls. This field allows the client to guide
+ the model on desired responses. The model can be instructed on response content
+ and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+ good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ into your voice", "laugh frequently"). The instructions are not guaranteed to be
+ followed by the model, but they provide guidance to the model on the desired
+ behavior.
+
+ Note that the server sets default instructions which will be used if this field
+ is not set and are visible in the `session.created` event at the start of the
+ session.
+ """
+
+ max_response_output_tokens: Union[int, Literal["inf"]]
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ modalities: List[Literal["text", "audio"]]
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+ """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+ temperature: float
+ """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+ tool_choice: str
+ """How the model chooses tools.
+
+ Options are `auto`, `none`, `required`, or specify a function, like
+ `{"type": "function", "function": {"name": "my_function"}}`.
+ """
+
+ tools: Iterable[ResponseTool]
+ """Tools (functions) available to the model."""
+
+ voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+ """The voice the model uses to respond.
+
+ Voice cannot be changed during the session once the model has responded with
+ audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+ `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
+
+
+class ResponseCreateEventParam(TypedDict, total=False):
+ type: Required[Literal["response.create"]]
+ """The event type, must be `response.create`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
+
+ response: Response
+ """Create a new Realtime response with these parameters"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py
new file mode 100644
index 00000000..a4990cf0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .realtime_response import RealtimeResponse
+
+__all__ = ["ResponseCreatedEvent"]
+
+
+class ResponseCreatedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ response: RealtimeResponse
+ """The response resource."""
+
+ type: Literal["response.created"]
+ """The event type, must be `response.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py
new file mode 100644
index 00000000..9e655184
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .realtime_response import RealtimeResponse
+
+__all__ = ["ResponseDoneEvent"]
+
+
+class ResponseDoneEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ response: RealtimeResponse
+ """The response resource."""
+
+ type: Literal["response.done"]
+ """The event type, must be `response.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py
new file mode 100644
index 00000000..cdbb64e6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"]
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
+ call_id: str
+ """The ID of the function call."""
+
+ delta: str
+ """The arguments delta as a JSON string."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the function call item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.function_call_arguments.delta"]
+ """The event type, must be `response.function_call_arguments.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py
new file mode 100644
index 00000000..0a5db533
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDoneEvent"]
+
+
+class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
+ arguments: str
+ """The final arguments as a JSON string."""
+
+ call_id: str
+ """The ID of the function call."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the function call item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.function_call_arguments.done"]
+ """The event type, must be `response.function_call_arguments.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py
new file mode 100644
index 00000000..c89bfdc3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ResponseOutputItemAddedEvent"]
+
+
+class ResponseOutputItemAddedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ item: ConversationItem
+ """The item to add to the conversation."""
+
+ output_index: int
+ """The index of the output item in the Response."""
+
+ response_id: str
+ """The ID of the Response to which the item belongs."""
+
+ type: Literal["response.output_item.added"]
+ """The event type, must be `response.output_item.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py
new file mode 100644
index 00000000..b5910e22
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ResponseOutputItemDoneEvent"]
+
+
+class ResponseOutputItemDoneEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ item: ConversationItem
+ """The item to add to the conversation."""
+
+ output_index: int
+ """The index of the output item in the Response."""
+
+ response_id: str
+ """The ID of the Response to which the item belongs."""
+
+ type: Literal["response.output_item.done"]
+ """The event type, must be `response.output_item.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py
new file mode 100644
index 00000000..c463b3c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseTextDeltaEvent"]
+
+
+class ResponseTextDeltaEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ delta: str
+ """The text delta."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ type: Literal["response.text.delta"]
+ """The event type, must be `response.text.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py
new file mode 100644
index 00000000..020ff41d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseTextDoneEvent"]
+
+
+class ResponseTextDoneEvent(BaseModel):
+ content_index: int
+ """The index of the content part in the item's content array."""
+
+ event_id: str
+ """The unique ID of the server event."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item in the response."""
+
+ response_id: str
+ """The ID of the response."""
+
+ text: str
+ """The final text content."""
+
+ type: Literal["response.text.done"]
+ """The event type, must be `response.text.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py
new file mode 100644
index 00000000..3ed53ff5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py
@@ -0,0 +1,227 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["Session", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", "TurnDetection"]
+
+
+class InputAudioNoiseReduction(BaseModel):
+ type: Optional[Literal["near_field", "far_field"]] = None
+ """Type of noise reduction.
+
+ `near_field` is for close-talking microphones such as headphones, `far_field` is
+ for far-field microphones such as laptop or conference room microphones.
+ """
+
+
+class InputAudioTranscription(BaseModel):
+ language: Optional[str] = None
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: Optional[str] = None
+ """
+ The model to use for transcription, current options are `gpt-4o-transcribe`,
+ `gpt-4o-mini-transcribe`, and `whisper-1`.
+ """
+
+ prompt: Optional[str] = None
+ """
+ An optional text to guide the model's style or continue a previous audio
+ segment. For `whisper-1`, the
+ [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+ For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+ "expect words related to technology".
+ """
+
+
+class Tool(BaseModel):
+ description: Optional[str] = None
+ """
+ The description of the function, including guidance on when and how to call it,
+ and guidance about what to tell the user when calling (if anything).
+ """
+
+ name: Optional[str] = None
+ """The name of the function."""
+
+ parameters: Optional[object] = None
+ """Parameters of the function in JSON Schema."""
+
+ type: Optional[Literal["function"]] = None
+ """The type of the tool, i.e. `function`."""
+
+
+class TurnDetection(BaseModel):
+ create_response: Optional[bool] = None
+ """
+ Whether or not to automatically generate a response when a VAD stop event
+ occurs.
+ """
+
+ eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
+ """Used only for `semantic_vad` mode.
+
+ The eagerness of the model to respond. `low` will wait longer for the user to
+ continue speaking, `high` will respond more quickly. `auto` is the default and
+ is equivalent to `medium`.
+ """
+
+ interrupt_response: Optional[bool] = None
+ """
+ Whether or not to automatically interrupt any ongoing response with output to
+ the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ occurs.
+ """
+
+ prefix_padding_ms: Optional[int] = None
+ """Used only for `server_vad` mode.
+
+ Amount of audio to include before the VAD detected speech (in milliseconds).
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: Optional[int] = None
+ """Used only for `server_vad` mode.
+
+ Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+ With shorter values the model will respond more quickly, but may jump in on
+ short pauses from the user.
+ """
+
+ threshold: Optional[float] = None
+ """Used only for `server_vad` mode.
+
+ Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+ threshold will require louder audio to activate the model, and thus might
+ perform better in noisy environments.
+ """
+
+ type: Optional[Literal["server_vad", "semantic_vad"]] = None
+ """Type of turn detection."""
+
+
+class Session(BaseModel):
+ id: Optional[str] = None
+ """Unique identifier for the session that looks like `sess_1234567890abcdef`."""
+
+ input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of input audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+ be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+ byte order.
+ """
+
+ input_audio_noise_reduction: Optional[InputAudioNoiseReduction] = None
+ """Configuration for input audio noise reduction.
+
+ This can be set to `null` to turn off. Noise reduction filters audio added to
+ the input audio buffer before it is sent to VAD and the model. Filtering the
+ audio can improve VAD and turn detection accuracy (reducing false positives) and
+ model performance by improving perception of the input audio.
+ """
+
+ input_audio_transcription: Optional[InputAudioTranscription] = None
+ """
+ Configuration for input audio transcription, defaults to off and can be set to
+ `null` to turn off once on. Input audio transcription is not native to the
+ model, since the model consumes audio directly. Transcription runs
+ asynchronously through
+ [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as guidance of input audio content rather than precisely
+ what the model heard. The client can optionally set the language and prompt for
+ transcription, these offer additional guidance to the transcription service.
+ """
+
+ instructions: Optional[str] = None
+ """The default system instructions (i.e.
+
+ system message) prepended to model calls. This field allows the client to guide
+ the model on desired responses. The model can be instructed on response content
+ and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+ good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ into your voice", "laugh frequently"). The instructions are not guaranteed to be
+ followed by the model, but they provide guidance to the model on the desired
+ behavior.
+
+ Note that the server sets default instructions which will be used if this field
+ is not set and are visible in the `session.created` event at the start of the
+ session.
+ """
+
+ max_response_output_tokens: Union[int, Literal["inf"], None] = None
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ model: Optional[
+ Literal[
+ "gpt-4o-realtime-preview",
+ "gpt-4o-realtime-preview-2024-10-01",
+ "gpt-4o-realtime-preview-2024-12-17",
+ "gpt-4o-mini-realtime-preview",
+ "gpt-4o-mini-realtime-preview-2024-12-17",
+ ]
+ ] = None
+ """The Realtime model used for this session."""
+
+ output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of output audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+ sampled at a rate of 24kHz.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature for the model, limited to [0.6, 1.2].
+
+ For audio models a temperature of 0.8 is highly recommended for best
+ performance.
+ """
+
+ tool_choice: Optional[str] = None
+ """How the model chooses tools.
+
+ Options are `auto`, `none`, `required`, or specify a function.
+ """
+
+ tools: Optional[List[Tool]] = None
+ """Tools (functions) available to the model."""
+
+ turn_detection: Optional[TurnDetection] = None
+ """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+ This can be set to `null` to turn off, in which case the client must manually
+ trigger model response. Server VAD means that the model will detect the start
+ and end of speech based on audio volume and respond at the end of user speech.
+ Semantic VAD is more advanced and uses a turn detection model (in conjuction
+ with VAD) to semantically estimate whether the user has finished speaking, then
+ dynamically sets a timeout based on this probability. For example, if user audio
+ trails off with "uhhm", the model will score a low probability of turn end and
+ wait longer for the user to continue speaking. This can be useful for more
+ natural conversations, but may have a higher latency.
+ """
+
+ voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+ """The voice the model uses to respond.
+
+ Voice cannot be changed during the session once the model has responded with
+ audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+ `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py
new file mode 100644
index 00000000..fe4a1c86
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py
@@ -0,0 +1,222 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["SessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", "TurnDetection"]
+
+
+class SessionCreateParams(TypedDict, total=False):
+ input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+ """The format of input audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+ be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+ byte order.
+ """
+
+ input_audio_noise_reduction: InputAudioNoiseReduction
+ """Configuration for input audio noise reduction.
+
+ This can be set to `null` to turn off. Noise reduction filters audio added to
+ the input audio buffer before it is sent to VAD and the model. Filtering the
+ audio can improve VAD and turn detection accuracy (reducing false positives) and
+ model performance by improving perception of the input audio.
+ """
+
+ input_audio_transcription: InputAudioTranscription
+ """
+ Configuration for input audio transcription, defaults to off and can be set to
+ `null` to turn off once on. Input audio transcription is not native to the
+ model, since the model consumes audio directly. Transcription runs
+ asynchronously through
+ [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as guidance of input audio content rather than precisely
+ what the model heard. The client can optionally set the language and prompt for
+ transcription, these offer additional guidance to the transcription service.
+ """
+
+ instructions: str
+ """The default system instructions (i.e.
+
+ system message) prepended to model calls. This field allows the client to guide
+ the model on desired responses. The model can be instructed on response content
+ and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+ good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ into your voice", "laugh frequently"). The instructions are not guaranteed to be
+ followed by the model, but they provide guidance to the model on the desired
+ behavior.
+
+ Note that the server sets default instructions which will be used if this field
+ is not set and are visible in the `session.created` event at the start of the
+ session.
+ """
+
+ max_response_output_tokens: Union[int, Literal["inf"]]
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+ """
+
+ modalities: List[Literal["text", "audio"]]
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ model: Literal[
+ "gpt-4o-realtime-preview",
+ "gpt-4o-realtime-preview-2024-10-01",
+ "gpt-4o-realtime-preview-2024-12-17",
+ "gpt-4o-mini-realtime-preview",
+ "gpt-4o-mini-realtime-preview-2024-12-17",
+ ]
+ """The Realtime model used for this session."""
+
+ output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+ """The format of output audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+ sampled at a rate of 24kHz.
+ """
+
+ temperature: float
+ """Sampling temperature for the model, limited to [0.6, 1.2].
+
+ For audio models a temperature of 0.8 is highly recommended for best
+ performance.
+ """
+
+ tool_choice: str
+ """How the model chooses tools.
+
+ Options are `auto`, `none`, `required`, or specify a function.
+ """
+
+ tools: Iterable[Tool]
+ """Tools (functions) available to the model."""
+
+ turn_detection: TurnDetection
+ """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+ This can be set to `null` to turn off, in which case the client must manually
+ trigger model response. Server VAD means that the model will detect the start
+ and end of speech based on audio volume and respond at the end of user speech.
+ Semantic VAD is more advanced and uses a turn detection model (in conjuction
+ with VAD) to semantically estimate whether the user has finished speaking, then
+ dynamically sets a timeout based on this probability. For example, if user audio
+ trails off with "uhhm", the model will score a low probability of turn end and
+ wait longer for the user to continue speaking. This can be useful for more
+ natural conversations, but may have a higher latency.
+ """
+
+ voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+ """The voice the model uses to respond.
+
+ Voice cannot be changed during the session once the model has responded with
+ audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+ `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
+
+
+class InputAudioNoiseReduction(TypedDict, total=False):
+ type: Literal["near_field", "far_field"]
+ """Type of noise reduction.
+
+ `near_field` is for close-talking microphones such as headphones, `far_field` is
+ for far-field microphones such as laptop or conference room microphones.
+ """
+
+
+class InputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: str
+ """
+ The model to use for transcription, current options are `gpt-4o-transcribe`,
+ `gpt-4o-mini-transcribe`, and `whisper-1`.
+ """
+
+ prompt: str
+ """
+ An optional text to guide the model's style or continue a previous audio
+ segment. For `whisper-1`, the
+ [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+ For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+ "expect words related to technology".
+ """
+
+
+class Tool(TypedDict, total=False):
+ description: str
+ """
+ The description of the function, including guidance on when and how to call it,
+ and guidance about what to tell the user when calling (if anything).
+ """
+
+ name: str
+ """The name of the function."""
+
+ parameters: object
+ """Parameters of the function in JSON Schema."""
+
+ type: Literal["function"]
+ """The type of the tool, i.e. `function`."""
+
+
+class TurnDetection(TypedDict, total=False):
+ create_response: bool
+ """
+ Whether or not to automatically generate a response when a VAD stop event
+ occurs.
+ """
+
+ eagerness: Literal["low", "medium", "high", "auto"]
+ """Used only for `semantic_vad` mode.
+
+ The eagerness of the model to respond. `low` will wait longer for the user to
+ continue speaking, `high` will respond more quickly. `auto` is the default and
+ is equivalent to `medium`.
+ """
+
+ interrupt_response: bool
+ """
+ Whether or not to automatically interrupt any ongoing response with output to
+ the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ occurs.
+ """
+
+ prefix_padding_ms: int
+ """Used only for `server_vad` mode.
+
+ Amount of audio to include before the VAD detected speech (in milliseconds).
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: int
+ """Used only for `server_vad` mode.
+
+ Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+ With shorter values the model will respond more quickly, but may jump in on
+ short pauses from the user.
+ """
+
+ threshold: float
+ """Used only for `server_vad` mode.
+
+ Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+ threshold will require louder audio to activate the model, and thus might
+ perform better in noisy environments.
+ """
+
+ type: Literal["server_vad", "semantic_vad"]
+ """Type of turn detection."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py
new file mode 100644
index 00000000..c26e62be
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py
@@ -0,0 +1,150 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["SessionCreateResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"]
+
+
+class ClientSecret(BaseModel):
+ expires_at: int
+ """Timestamp for when the token expires.
+
+ Currently, all tokens expire after one minute.
+ """
+
+ value: str
+ """
+ Ephemeral key usable in client environments to authenticate connections to the
+ Realtime API. Use this in client-side environments rather than a standard API
+ token, which should only be used server-side.
+ """
+
+
+class InputAudioTranscription(BaseModel):
+ model: Optional[str] = None
+ """
+ The model to use for transcription, `whisper-1` is the only currently supported
+ model.
+ """
+
+
+class Tool(BaseModel):
+ description: Optional[str] = None
+ """
+ The description of the function, including guidance on when and how to call it,
+ and guidance about what to tell the user when calling (if anything).
+ """
+
+ name: Optional[str] = None
+ """The name of the function."""
+
+ parameters: Optional[object] = None
+ """Parameters of the function in JSON Schema."""
+
+ type: Optional[Literal["function"]] = None
+ """The type of the tool, i.e. `function`."""
+
+
+class TurnDetection(BaseModel):
+ prefix_padding_ms: Optional[int] = None
+ """Amount of audio to include before the VAD detected speech (in milliseconds).
+
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: Optional[int] = None
+ """Duration of silence to detect speech stop (in milliseconds).
+
+ Defaults to 500ms. With shorter values the model will respond more quickly, but
+ may jump in on short pauses from the user.
+ """
+
+ threshold: Optional[float] = None
+ """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
+
+ A higher threshold will require louder audio to activate the model, and thus
+ might perform better in noisy environments.
+ """
+
+ type: Optional[str] = None
+ """Type of turn detection, only `server_vad` is currently supported."""
+
+
+class SessionCreateResponse(BaseModel):
+ client_secret: ClientSecret
+ """Ephemeral key returned by the API."""
+
+ input_audio_format: Optional[str] = None
+ """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+ input_audio_transcription: Optional[InputAudioTranscription] = None
+ """
+ Configuration for input audio transcription, defaults to off and can be set to
+ `null` to turn off once on. Input audio transcription is not native to the
+ model, since the model consumes audio directly. Transcription runs
+ asynchronously through Whisper and should be treated as rough guidance rather
+ than the representation understood by the model.
+ """
+
+ instructions: Optional[str] = None
+ """The default system instructions (i.e.
+
+ system message) prepended to model calls. This field allows the client to guide
+ the model on desired responses. The model can be instructed on response content
+ and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+ good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ into your voice", "laugh frequently"). The instructions are not guaranteed to be
+ followed by the model, but they provide guidance to the model on the desired
+ behavior.
+
+ Note that the server sets default instructions which will be used if this field
+ is not set and are visible in the `session.created` event at the start of the
+ session.
+ """
+
+ max_response_output_tokens: Union[int, Literal["inf"], None] = None
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ output_audio_format: Optional[str] = None
+ """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+ temperature: Optional[float] = None
+ """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+ tool_choice: Optional[str] = None
+ """How the model chooses tools.
+
+ Options are `auto`, `none`, `required`, or specify a function.
+ """
+
+ tools: Optional[List[Tool]] = None
+ """Tools (functions) available to the model."""
+
+ turn_detection: Optional[TurnDetection] = None
+ """Configuration for turn detection.
+
+ Can be set to `null` to turn off. Server VAD means that the model will detect
+ the start and end of speech based on audio volume and respond at the end of user
+ speech.
+ """
+
+ voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+ """The voice the model uses to respond.
+
+ Voice cannot be changed during the session once the model has responded with
+ audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+ `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py
new file mode 100644
index 00000000..baf6af38
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .session import Session
+from ...._models import BaseModel
+
+__all__ = ["SessionCreatedEvent"]
+
+
+class SessionCreatedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ session: Session
+ """Realtime session object configuration."""
+
+ type: Literal["session.created"]
+ """The event type, must be `session.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py
new file mode 100644
index 00000000..00180f59
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py
@@ -0,0 +1,242 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = [
+ "SessionUpdateEvent",
+ "Session",
+ "SessionInputAudioNoiseReduction",
+ "SessionInputAudioTranscription",
+ "SessionTool",
+ "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(BaseModel):
+ type: Optional[Literal["near_field", "far_field"]] = None
+ """Type of noise reduction.
+
+ `near_field` is for close-talking microphones such as headphones, `far_field` is
+ for far-field microphones such as laptop or conference room microphones.
+ """
+
+
+class SessionInputAudioTranscription(BaseModel):
+ language: Optional[str] = None
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: Optional[str] = None
+ """
+ The model to use for transcription, current options are `gpt-4o-transcribe`,
+ `gpt-4o-mini-transcribe`, and `whisper-1`.
+ """
+
+ prompt: Optional[str] = None
+ """
+ An optional text to guide the model's style or continue a previous audio
+ segment. For `whisper-1`, the
+ [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+ For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+ "expect words related to technology".
+ """
+
+
+class SessionTool(BaseModel):
+ description: Optional[str] = None
+ """
+ The description of the function, including guidance on when and how to call it,
+ and guidance about what to tell the user when calling (if anything).
+ """
+
+ name: Optional[str] = None
+ """The name of the function."""
+
+ parameters: Optional[object] = None
+ """Parameters of the function in JSON Schema."""
+
+ type: Optional[Literal["function"]] = None
+ """The type of the tool, i.e. `function`."""
+
+
+class SessionTurnDetection(BaseModel):
+ create_response: Optional[bool] = None
+ """
+ Whether or not to automatically generate a response when a VAD stop event
+ occurs.
+ """
+
+ eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
+ """Used only for `semantic_vad` mode.
+
+ The eagerness of the model to respond. `low` will wait longer for the user to
+ continue speaking, `high` will respond more quickly. `auto` is the default and
+ is equivalent to `medium`.
+ """
+
+ interrupt_response: Optional[bool] = None
+ """
+ Whether or not to automatically interrupt any ongoing response with output to
+ the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ occurs.
+ """
+
+ prefix_padding_ms: Optional[int] = None
+ """Used only for `server_vad` mode.
+
+ Amount of audio to include before the VAD detected speech (in milliseconds).
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: Optional[int] = None
+ """Used only for `server_vad` mode.
+
+ Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+ With shorter values the model will respond more quickly, but may jump in on
+ short pauses from the user.
+ """
+
+ threshold: Optional[float] = None
+ """Used only for `server_vad` mode.
+
+ Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+ threshold will require louder audio to activate the model, and thus might
+ perform better in noisy environments.
+ """
+
+ type: Optional[Literal["server_vad", "semantic_vad"]] = None
+ """Type of turn detection."""
+
+
+class Session(BaseModel):
+ input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of input audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+ be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+ byte order.
+ """
+
+ input_audio_noise_reduction: Optional[SessionInputAudioNoiseReduction] = None
+ """Configuration for input audio noise reduction.
+
+ This can be set to `null` to turn off. Noise reduction filters audio added to
+ the input audio buffer before it is sent to VAD and the model. Filtering the
+ audio can improve VAD and turn detection accuracy (reducing false positives) and
+ model performance by improving perception of the input audio.
+ """
+
+ input_audio_transcription: Optional[SessionInputAudioTranscription] = None
+ """
+ Configuration for input audio transcription, defaults to off and can be set to
+ `null` to turn off once on. Input audio transcription is not native to the
+ model, since the model consumes audio directly. Transcription runs
+ asynchronously through
+ [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as guidance of input audio content rather than precisely
+ what the model heard. The client can optionally set the language and prompt for
+ transcription, these offer additional guidance to the transcription service.
+ """
+
+ instructions: Optional[str] = None
+ """The default system instructions (i.e.
+
+ system message) prepended to model calls. This field allows the client to guide
+ the model on desired responses. The model can be instructed on response content
+ and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+ good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ into your voice", "laugh frequently"). The instructions are not guaranteed to be
+ followed by the model, but they provide guidance to the model on the desired
+ behavior.
+
+ Note that the server sets default instructions which will be used if this field
+ is not set and are visible in the `session.created` event at the start of the
+ session.
+ """
+
+ max_response_output_tokens: Union[int, Literal["inf"], None] = None
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ model: Optional[
+ Literal[
+ "gpt-4o-realtime-preview",
+ "gpt-4o-realtime-preview-2024-10-01",
+ "gpt-4o-realtime-preview-2024-12-17",
+ "gpt-4o-mini-realtime-preview",
+ "gpt-4o-mini-realtime-preview-2024-12-17",
+ ]
+ ] = None
+ """The Realtime model used for this session."""
+
+ output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of output audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+ sampled at a rate of 24kHz.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature for the model, limited to [0.6, 1.2].
+
+ For audio models a temperature of 0.8 is highly recommended for best
+ performance.
+ """
+
+ tool_choice: Optional[str] = None
+ """How the model chooses tools.
+
+ Options are `auto`, `none`, `required`, or specify a function.
+ """
+
+ tools: Optional[List[SessionTool]] = None
+ """Tools (functions) available to the model."""
+
+ turn_detection: Optional[SessionTurnDetection] = None
+ """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+ This can be set to `null` to turn off, in which case the client must manually
+ trigger model response. Server VAD means that the model will detect the start
+ and end of speech based on audio volume and respond at the end of user speech.
+ Semantic VAD is more advanced and uses a turn detection model (in conjuction
+ with VAD) to semantically estimate whether the user has finished speaking, then
+ dynamically sets a timeout based on this probability. For example, if user audio
+ trails off with "uhhm", the model will score a low probability of turn end and
+ wait longer for the user to continue speaking. This can be useful for more
+ natural conversations, but may have a higher latency.
+ """
+
+ voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+ """The voice the model uses to respond.
+
+ Voice cannot be changed during the session once the model has responded with
+ audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+ `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
+
+
+class SessionUpdateEvent(BaseModel):
+ session: Session
+ """Realtime session object configuration."""
+
+ type: Literal["session.update"]
+ """The event type, must be `session.update`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py
new file mode 100644
index 00000000..b8bce8fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py
@@ -0,0 +1,240 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = [
+ "SessionUpdateEventParam",
+ "Session",
+ "SessionInputAudioNoiseReduction",
+ "SessionInputAudioTranscription",
+ "SessionTool",
+ "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(TypedDict, total=False):
+ type: Literal["near_field", "far_field"]
+ """Type of noise reduction.
+
+ `near_field` is for close-talking microphones such as headphones, `far_field` is
+ for far-field microphones such as laptop or conference room microphones.
+ """
+
+
+class SessionInputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: str
+ """
+ The model to use for transcription, current options are `gpt-4o-transcribe`,
+ `gpt-4o-mini-transcribe`, and `whisper-1`.
+ """
+
+ prompt: str
+ """
+ An optional text to guide the model's style or continue a previous audio
+ segment. For `whisper-1`, the
+ [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+ For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+ "expect words related to technology".
+ """
+
+
+class SessionTool(TypedDict, total=False):
+ description: str
+ """
+ The description of the function, including guidance on when and how to call it,
+ and guidance about what to tell the user when calling (if anything).
+ """
+
+ name: str
+ """The name of the function."""
+
+ parameters: object
+ """Parameters of the function in JSON Schema."""
+
+ type: Literal["function"]
+ """The type of the tool, i.e. `function`."""
+
+
+class SessionTurnDetection(TypedDict, total=False):
+ create_response: bool
+ """
+ Whether or not to automatically generate a response when a VAD stop event
+ occurs.
+ """
+
+ eagerness: Literal["low", "medium", "high", "auto"]
+ """Used only for `semantic_vad` mode.
+
+ The eagerness of the model to respond. `low` will wait longer for the user to
+ continue speaking, `high` will respond more quickly. `auto` is the default and
+ is equivalent to `medium`.
+ """
+
+ interrupt_response: bool
+ """
+ Whether or not to automatically interrupt any ongoing response with output to
+ the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ occurs.
+ """
+
+ prefix_padding_ms: int
+ """Used only for `server_vad` mode.
+
+ Amount of audio to include before the VAD detected speech (in milliseconds).
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: int
+ """Used only for `server_vad` mode.
+
+ Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+ With shorter values the model will respond more quickly, but may jump in on
+ short pauses from the user.
+ """
+
+ threshold: float
+ """Used only for `server_vad` mode.
+
+ Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+ threshold will require louder audio to activate the model, and thus might
+ perform better in noisy environments.
+ """
+
+ type: Literal["server_vad", "semantic_vad"]
+ """Type of turn detection."""
+
+
+class Session(TypedDict, total=False):
+ input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+ """The format of input audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+ be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+ byte order.
+ """
+
+ input_audio_noise_reduction: SessionInputAudioNoiseReduction
+ """Configuration for input audio noise reduction.
+
+ This can be set to `null` to turn off. Noise reduction filters audio added to
+ the input audio buffer before it is sent to VAD and the model. Filtering the
+ audio can improve VAD and turn detection accuracy (reducing false positives) and
+ model performance by improving perception of the input audio.
+ """
+
+ input_audio_transcription: SessionInputAudioTranscription
+ """
+ Configuration for input audio transcription, defaults to off and can be set to
+ `null` to turn off once on. Input audio transcription is not native to the
+ model, since the model consumes audio directly. Transcription runs
+ asynchronously through
+ [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as guidance of input audio content rather than precisely
+ what the model heard. The client can optionally set the language and prompt for
+ transcription, these offer additional guidance to the transcription service.
+ """
+
+ instructions: str
+ """The default system instructions (i.e.
+
+ system message) prepended to model calls. This field allows the client to guide
+ the model on desired responses. The model can be instructed on response content
+ and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+ good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ into your voice", "laugh frequently"). The instructions are not guaranteed to be
+ followed by the model, but they provide guidance to the model on the desired
+ behavior.
+
+ Note that the server sets default instructions which will be used if this field
+ is not set and are visible in the `session.created` event at the start of the
+ session.
+ """
+
+ max_response_output_tokens: Union[int, Literal["inf"]]
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+ """
+
+ modalities: List[Literal["text", "audio"]]
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ model: Literal[
+ "gpt-4o-realtime-preview",
+ "gpt-4o-realtime-preview-2024-10-01",
+ "gpt-4o-realtime-preview-2024-12-17",
+ "gpt-4o-mini-realtime-preview",
+ "gpt-4o-mini-realtime-preview-2024-12-17",
+ ]
+ """The Realtime model used for this session."""
+
+ output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+ """The format of output audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+ sampled at a rate of 24kHz.
+ """
+
+ temperature: float
+ """Sampling temperature for the model, limited to [0.6, 1.2].
+
+ For audio models a temperature of 0.8 is highly recommended for best
+ performance.
+ """
+
+ tool_choice: str
+ """How the model chooses tools.
+
+ Options are `auto`, `none`, `required`, or specify a function.
+ """
+
+ tools: Iterable[SessionTool]
+ """Tools (functions) available to the model."""
+
+ turn_detection: SessionTurnDetection
+ """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+ This can be set to `null` to turn off, in which case the client must manually
+ trigger model response. Server VAD means that the model will detect the start
+ and end of speech based on audio volume and respond at the end of user speech.
+ Semantic VAD is more advanced and uses a turn detection model (in conjuction
+ with VAD) to semantically estimate whether the user has finished speaking, then
+ dynamically sets a timeout based on this probability. For example, if user audio
+ trails off with "uhhm", the model will score a low probability of turn end and
+ wait longer for the user to continue speaking. This can be useful for more
+ natural conversations, but may have a higher latency.
+ """
+
+ voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+ """The voice the model uses to respond.
+
+ Voice cannot be changed during the session once the model has responded with
+ audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+ `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
+
+
+class SessionUpdateEventParam(TypedDict, total=False):
+ session: Required[Session]
+ """Realtime session object configuration."""
+
+ type: Required[Literal["session.update"]]
+ """The event type, must be `session.update`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py
new file mode 100644
index 00000000..b9b6488e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .session import Session
+from ...._models import BaseModel
+
+__all__ = ["SessionUpdatedEvent"]
+
+
+class SessionUpdatedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ session: Session
+ """Realtime session object configuration."""
+
+ type: Literal["session.updated"]
+ """The event type, must be `session.updated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py
new file mode 100644
index 00000000..7c7abf37
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py
@@ -0,0 +1,100 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["TranscriptionSession", "ClientSecret", "InputAudioTranscription", "TurnDetection"]
+
+
+class ClientSecret(BaseModel):
+ expires_at: int
+ """Timestamp for when the token expires.
+
+ Currently, all tokens expire after one minute.
+ """
+
+ value: str
+ """
+ Ephemeral key usable in client environments to authenticate connections to the
+ Realtime API. Use this in client-side environments rather than a standard API
+ token, which should only be used server-side.
+ """
+
+
+class InputAudioTranscription(BaseModel):
+ language: Optional[str] = None
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None
+ """The model to use for transcription.
+
+ Can be `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, or `whisper-1`.
+ """
+
+ prompt: Optional[str] = None
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
+
+class TurnDetection(BaseModel):
+ prefix_padding_ms: Optional[int] = None
+ """Amount of audio to include before the VAD detected speech (in milliseconds).
+
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: Optional[int] = None
+ """Duration of silence to detect speech stop (in milliseconds).
+
+ Defaults to 500ms. With shorter values the model will respond more quickly, but
+ may jump in on short pauses from the user.
+ """
+
+ threshold: Optional[float] = None
+ """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
+
+ A higher threshold will require louder audio to activate the model, and thus
+ might perform better in noisy environments.
+ """
+
+ type: Optional[str] = None
+ """Type of turn detection, only `server_vad` is currently supported."""
+
+
+class TranscriptionSession(BaseModel):
+ client_secret: ClientSecret
+ """Ephemeral key returned by the API.
+
+ Only present when the session is created on the server via REST API.
+ """
+
+ input_audio_format: Optional[str] = None
+ """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+ input_audio_transcription: Optional[InputAudioTranscription] = None
+ """Configuration of the transcription model."""
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ turn_detection: Optional[TurnDetection] = None
+ """Configuration for turn detection.
+
+ Can be set to `null` to turn off. Server VAD means that the model will detect
+ the start and end of speech based on audio volume and respond at the end of user
+ speech.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py
new file mode 100644
index 00000000..4066dc4c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py
@@ -0,0 +1,143 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["TranscriptionSessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "TurnDetection"]
+
+
+class TranscriptionSessionCreateParams(TypedDict, total=False):
+ include: List[str]
+ """The set of items to include in the transcription. Current available items are:
+
+ - `item.input_audio_transcription.logprobs`
+ """
+
+ input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+ """The format of input audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+ be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+ byte order.
+ """
+
+ input_audio_noise_reduction: InputAudioNoiseReduction
+ """Configuration for input audio noise reduction.
+
+ This can be set to `null` to turn off. Noise reduction filters audio added to
+ the input audio buffer before it is sent to VAD and the model. Filtering the
+ audio can improve VAD and turn detection accuracy (reducing false positives) and
+ model performance by improving perception of the input audio.
+ """
+
+ input_audio_transcription: InputAudioTranscription
+ """Configuration for input audio transcription.
+
+ The client can optionally set the language and prompt for transcription, these
+ offer additional guidance to the transcription service.
+ """
+
+ modalities: List[Literal["text", "audio"]]
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ turn_detection: TurnDetection
+ """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+ This can be set to `null` to turn off, in which case the client must manually
+ trigger model response. Server VAD means that the model will detect the start
+ and end of speech based on audio volume and respond at the end of user speech.
+ Semantic VAD is more advanced and uses a turn detection model (in conjuction
+ with VAD) to semantically estimate whether the user has finished speaking, then
+ dynamically sets a timeout based on this probability. For example, if user audio
+ trails off with "uhhm", the model will score a low probability of turn end and
+ wait longer for the user to continue speaking. This can be useful for more
+ natural conversations, but may have a higher latency.
+ """
+
+
+class InputAudioNoiseReduction(TypedDict, total=False):
+ type: Literal["near_field", "far_field"]
+ """Type of noise reduction.
+
+ `near_field` is for close-talking microphones such as headphones, `far_field` is
+ for far-field microphones such as laptop or conference room microphones.
+ """
+
+
+class InputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]
+ """
+ The model to use for transcription, current options are `gpt-4o-transcribe`,
+ `gpt-4o-mini-transcribe`, and `whisper-1`.
+ """
+
+ prompt: str
+ """
+ An optional text to guide the model's style or continue a previous audio
+ segment. For `whisper-1`, the
+ [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+ For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+ "expect words related to technology".
+ """
+
+
+class TurnDetection(TypedDict, total=False):
+ create_response: bool
+ """
+ Whether or not to automatically generate a response when a VAD stop event
+ occurs.
+ """
+
+ eagerness: Literal["low", "medium", "high", "auto"]
+ """Used only for `semantic_vad` mode.
+
+ The eagerness of the model to respond. `low` will wait longer for the user to
+ continue speaking, `high` will respond more quickly. `auto` is the default and
+ is equivalent to `medium`.
+ """
+
+ interrupt_response: bool
+ """
+ Whether or not to automatically interrupt any ongoing response with output to
+ the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ occurs.
+ """
+
+ prefix_padding_ms: int
+ """Used only for `server_vad` mode.
+
+ Amount of audio to include before the VAD detected speech (in milliseconds).
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: int
+ """Used only for `server_vad` mode.
+
+ Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+ With shorter values the model will respond more quickly, but may jump in on
+ short pauses from the user.
+ """
+
+ threshold: float
+ """Used only for `server_vad` mode.
+
+ Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+ threshold will require louder audio to activate the model, and thus might
+ perform better in noisy environments.
+ """
+
+ type: Literal["server_vad", "semantic_vad"]
+ """Type of turn detection."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py
new file mode 100644
index 00000000..043ac02e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py
@@ -0,0 +1,160 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = [
+ "TranscriptionSessionUpdate",
+ "Session",
+ "SessionInputAudioNoiseReduction",
+ "SessionInputAudioTranscription",
+ "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(BaseModel):
+ type: Optional[Literal["near_field", "far_field"]] = None
+ """Type of noise reduction.
+
+ `near_field` is for close-talking microphones such as headphones, `far_field` is
+ for far-field microphones such as laptop or conference room microphones.
+ """
+
+
+class SessionInputAudioTranscription(BaseModel):
+ language: Optional[str] = None
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None
+ """
+ The model to use for transcription, current options are `gpt-4o-transcribe`,
+ `gpt-4o-mini-transcribe`, and `whisper-1`.
+ """
+
+ prompt: Optional[str] = None
+ """
+ An optional text to guide the model's style or continue a previous audio
+ segment. For `whisper-1`, the
+ [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+ For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+ "expect words related to technology".
+ """
+
+
+class SessionTurnDetection(BaseModel):
+ create_response: Optional[bool] = None
+ """
+ Whether or not to automatically generate a response when a VAD stop event
+ occurs.
+ """
+
+ eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
+ """Used only for `semantic_vad` mode.
+
+ The eagerness of the model to respond. `low` will wait longer for the user to
+ continue speaking, `high` will respond more quickly. `auto` is the default and
+ is equivalent to `medium`.
+ """
+
+ interrupt_response: Optional[bool] = None
+ """
+ Whether or not to automatically interrupt any ongoing response with output to
+ the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ occurs.
+ """
+
+ prefix_padding_ms: Optional[int] = None
+ """Used only for `server_vad` mode.
+
+ Amount of audio to include before the VAD detected speech (in milliseconds).
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: Optional[int] = None
+ """Used only for `server_vad` mode.
+
+ Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+ With shorter values the model will respond more quickly, but may jump in on
+ short pauses from the user.
+ """
+
+ threshold: Optional[float] = None
+ """Used only for `server_vad` mode.
+
+ Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+ threshold will require louder audio to activate the model, and thus might
+ perform better in noisy environments.
+ """
+
+ type: Optional[Literal["server_vad", "semantic_vad"]] = None
+ """Type of turn detection."""
+
+
+class Session(BaseModel):
+ include: Optional[List[str]] = None
+ """The set of items to include in the transcription. Current available items are:
+
+ - `item.input_audio_transcription.logprobs`
+ """
+
+ input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of input audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+ be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+ byte order.
+ """
+
+ input_audio_noise_reduction: Optional[SessionInputAudioNoiseReduction] = None
+ """Configuration for input audio noise reduction.
+
+ This can be set to `null` to turn off. Noise reduction filters audio added to
+ the input audio buffer before it is sent to VAD and the model. Filtering the
+ audio can improve VAD and turn detection accuracy (reducing false positives) and
+ model performance by improving perception of the input audio.
+ """
+
+ input_audio_transcription: Optional[SessionInputAudioTranscription] = None
+ """Configuration for input audio transcription.
+
+ The client can optionally set the language and prompt for transcription, these
+ offer additional guidance to the transcription service.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ turn_detection: Optional[SessionTurnDetection] = None
+ """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+ This can be set to `null` to turn off, in which case the client must manually
+ trigger model response. Server VAD means that the model will detect the start
+ and end of speech based on audio volume and respond at the end of user speech.
+ Semantic VAD is more advanced and uses a turn detection model (in conjuction
+ with VAD) to semantically estimate whether the user has finished speaking, then
+ dynamically sets a timeout based on this probability. For example, if user audio
+ trails off with "uhhm", the model will score a low probability of turn end and
+ wait longer for the user to continue speaking. This can be useful for more
+ natural conversations, but may have a higher latency.
+ """
+
+
+class TranscriptionSessionUpdate(BaseModel):
+ session: Session
+ """Realtime transcription session object configuration."""
+
+ type: Literal["transcription_session.update"]
+ """The event type, must be `transcription_session.update`."""
+
+ event_id: Optional[str] = None
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py
new file mode 100644
index 00000000..997a36d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py
@@ -0,0 +1,160 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = [
+ "TranscriptionSessionUpdateParam",
+ "Session",
+ "SessionInputAudioNoiseReduction",
+ "SessionInputAudioTranscription",
+ "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(TypedDict, total=False):
+ type: Literal["near_field", "far_field"]
+ """Type of noise reduction.
+
+ `near_field` is for close-talking microphones such as headphones, `far_field` is
+ for far-field microphones such as laptop or conference room microphones.
+ """
+
+
+class SessionInputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
+ model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]
+ """
+ The model to use for transcription, current options are `gpt-4o-transcribe`,
+ `gpt-4o-mini-transcribe`, and `whisper-1`.
+ """
+
+ prompt: str
+ """
+ An optional text to guide the model's style or continue a previous audio
+ segment. For `whisper-1`, the
+ [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+ For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+ "expect words related to technology".
+ """
+
+
+class SessionTurnDetection(TypedDict, total=False):
+ create_response: bool
+ """
+ Whether or not to automatically generate a response when a VAD stop event
+ occurs.
+ """
+
+ eagerness: Literal["low", "medium", "high", "auto"]
+ """Used only for `semantic_vad` mode.
+
+ The eagerness of the model to respond. `low` will wait longer for the user to
+ continue speaking, `high` will respond more quickly. `auto` is the default and
+ is equivalent to `medium`.
+ """
+
+ interrupt_response: bool
+ """
+ Whether or not to automatically interrupt any ongoing response with output to
+ the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ occurs.
+ """
+
+ prefix_padding_ms: int
+ """Used only for `server_vad` mode.
+
+ Amount of audio to include before the VAD detected speech (in milliseconds).
+ Defaults to 300ms.
+ """
+
+ silence_duration_ms: int
+ """Used only for `server_vad` mode.
+
+ Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+ With shorter values the model will respond more quickly, but may jump in on
+ short pauses from the user.
+ """
+
+ threshold: float
+ """Used only for `server_vad` mode.
+
+ Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+ threshold will require louder audio to activate the model, and thus might
+ perform better in noisy environments.
+ """
+
+ type: Literal["server_vad", "semantic_vad"]
+ """Type of turn detection."""
+
+
+class Session(TypedDict, total=False):
+ include: List[str]
+ """The set of items to include in the transcription. Current available items are:
+
+ - `item.input_audio_transcription.logprobs`
+ """
+
+ input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+ """The format of input audio.
+
+ Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+ be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+ byte order.
+ """
+
+ input_audio_noise_reduction: SessionInputAudioNoiseReduction
+ """Configuration for input audio noise reduction.
+
+ This can be set to `null` to turn off. Noise reduction filters audio added to
+ the input audio buffer before it is sent to VAD and the model. Filtering the
+ audio can improve VAD and turn detection accuracy (reducing false positives) and
+ model performance by improving perception of the input audio.
+ """
+
+ input_audio_transcription: SessionInputAudioTranscription
+ """Configuration for input audio transcription.
+
+ The client can optionally set the language and prompt for transcription, these
+ offer additional guidance to the transcription service.
+ """
+
+ modalities: List[Literal["text", "audio"]]
+ """The set of modalities the model can respond with.
+
+ To disable audio, set this to ["text"].
+ """
+
+ turn_detection: SessionTurnDetection
+ """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+ This can be set to `null` to turn off, in which case the client must manually
+ trigger model response. Server VAD means that the model will detect the start
+ and end of speech based on audio volume and respond at the end of user speech.
+ Semantic VAD is more advanced and uses a turn detection model (in conjuction
+ with VAD) to semantically estimate whether the user has finished speaking, then
+ dynamically sets a timeout based on this probability. For example, if user audio
+ trails off with "uhhm", the model will score a low probability of turn end and
+ wait longer for the user to continue speaking. This can be useful for more
+ natural conversations, but may have a higher latency.
+ """
+
+
+class TranscriptionSessionUpdateParam(TypedDict, total=False):
+ session: Required[Session]
+ """Realtime transcription session object configuration."""
+
+ type: Required[Literal["transcription_session.update"]]
+ """The event type, must be `transcription_session.update`."""
+
+ event_id: str
+ """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py
new file mode 100644
index 00000000..ffc100bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .transcription_session import TranscriptionSession
+
+__all__ = ["TranscriptionSessionUpdatedEvent"]
+
+
+class TranscriptionSessionUpdatedEvent(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ session: TranscriptionSession
+ """A new Realtime transcription session configuration.
+
+ When a session is created on the server via REST API, the session object also
+ contains an ephemeral key. Default TTL for keys is one minute. This property is
+ not present when a session is updated via the WebSocket API.
+ """
+
+ type: Literal["transcription_session.updated"]
+ """The event type, must be `transcription_session.updated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread.py
new file mode 100644
index 00000000..789f66e4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread.py
@@ -0,0 +1,63 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.metadata import Metadata
+
+__all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ToolResourcesCodeInterpreter(BaseModel):
+ file_ids: Optional[List[str]] = None
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(BaseModel):
+ vector_store_ids: Optional[List[str]] = None
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+
+class ToolResources(BaseModel):
+ code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
+
+ file_search: Optional[ToolResourcesFileSearch] = None
+
+
+class Thread(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the thread was created."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ object: Literal["thread"]
+ """The object type, which is always `thread`."""
+
+ tool_resources: Optional[ToolResources] = None
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py
new file mode 100644
index 00000000..065c390f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py
@@ -0,0 +1,401 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared.chat_model import ChatModel
+from .function_tool_param import FunctionToolParam
+from .file_search_tool_param import FileSearchToolParam
+from ..shared_params.metadata import Metadata
+from .code_interpreter_tool_param import CodeInterpreterToolParam
+from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
+from .threads.message_content_part_param import MessageContentPartParam
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = [
+ "ThreadCreateAndRunParamsBase",
+ "Thread",
+ "ThreadMessage",
+ "ThreadMessageAttachment",
+ "ThreadMessageAttachmentTool",
+ "ThreadMessageAttachmentToolFileSearch",
+ "ThreadToolResources",
+ "ThreadToolResourcesCodeInterpreter",
+ "ThreadToolResourcesFileSearch",
+ "ThreadToolResourcesFileSearchVectorStore",
+ "ThreadToolResourcesFileSearchVectorStoreChunkingStrategy",
+ "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+ "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+ "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
+ "ToolResources",
+ "ToolResourcesCodeInterpreter",
+ "ToolResourcesFileSearch",
+ "Tool",
+ "TruncationStrategy",
+ "ThreadCreateAndRunParamsNonStreaming",
+ "ThreadCreateAndRunParamsStreaming",
+]
+
+
+class ThreadCreateAndRunParamsBase(TypedDict, total=False):
+ assistant_id: Required[str]
+ """
+ The ID of the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ execute this run.
+ """
+
+ instructions: Optional[str]
+ """Override the default system message of the assistant.
+
+ This is useful for modifying the behavior on a per-run basis.
+ """
+
+ max_completion_tokens: Optional[int]
+ """
+ The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run. If the run exceeds the number of
+ completion tokens specified, the run will end with status `incomplete`. See
+ `incomplete_details` for more info.
+ """
+
+ max_prompt_tokens: Optional[int]
+ """The maximum number of prompt tokens that may be used over the course of the run.
+
+ The run will make a best effort to use only the number of prompt tokens
+ specified, across multiple turns of the run. If the run exceeds the number of
+ prompt tokens specified, the run will end with status `incomplete`. See
+ `incomplete_details` for more info.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: Union[str, ChatModel, None]
+ """
+ The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ be used to execute this run. If a value is provided here, it will override the
+ model associated with the assistant. If not, the model associated with the
+ assistant will be used.
+ """
+
+ parallel_tool_calls: bool
+ """
+ Whether to enable
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+ during tool use.
+ """
+
+ response_format: Optional[AssistantResponseFormatOptionParam]
+ """Specifies the format that the model must output.
+
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
+ in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+ """
+
+ thread: Thread
+ """Options to create a new thread.
+
+ If no thread is provided when running a request, an empty thread will be
+ created.
+ """
+
+ tool_choice: Optional[AssistantToolChoiceOptionParam]
+ """
+ Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tools and instead generates a message. `auto` is the default value
+ and means the model can pick between generating a message or calling one or more
+ tools. `required` means the model must call one or more tools before responding
+ to the user. Specifying a particular tool like `{"type": "file_search"}` or
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+ """
+
+ tool_resources: Optional[ToolResources]
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+ tools: Optional[Iterable[Tool]]
+ """Override the tools the assistant can use for this run.
+
+ This is useful for modifying the behavior on a per-run basis.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
+ """
+
+ truncation_strategy: Optional[TruncationStrategy]
+ """Controls for how a thread will be truncated prior to the run.
+
+ Use this to control the intial context window of the run.
+ """
+
+
+class ThreadMessageAttachmentToolFileSearch(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """The type of tool being defined: `file_search`"""
+
+
+ThreadMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, ThreadMessageAttachmentToolFileSearch]
+
+
+class ThreadMessageAttachment(TypedDict, total=False):
+ file_id: str
+ """The ID of the file to attach to the message."""
+
+ tools: Iterable[ThreadMessageAttachmentTool]
+ """The tools to add this file to."""
+
+
+class ThreadMessage(TypedDict, total=False):
+ content: Required[Union[str, Iterable[MessageContentPartParam]]]
+ """The text contents of the message."""
+
+ role: Required[Literal["user", "assistant"]]
+ """The role of the entity that is creating the message. Allowed values include:
+
+ - `user`: Indicates the message is sent by an actual user and should be used in
+ most cases to represent user-generated messages.
+ - `assistant`: Indicates the message is generated by the assistant. Use this
+ value to insert messages from the assistant into the conversation.
+ """
+
+ attachments: Optional[Iterable[ThreadMessageAttachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
+class ThreadToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+ type: Required[Literal["auto"]]
+ """Always `auto`."""
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+ chunk_overlap_tokens: Required[int]
+ """The number of tokens that overlap between chunks. The default value is `400`.
+
+ Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+ """
+
+ max_chunk_size_tokens: Required[int]
+ """The maximum number of tokens in each chunk.
+
+ The default value is `800`. The minimum value is `100` and the maximum value is
+ `4096`.
+ """
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+ static: Required[ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+ type: Required[Literal["static"]]
+ """Always `static`."""
+
+
+ThreadToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+ ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto,
+ ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic,
+]
+
+
+class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
+ chunking_strategy: ThreadToolResourcesFileSearchVectorStoreChunkingStrategy
+ """The chunking strategy used to chunk the file(s).
+
+ If not set, will use the `auto` strategy.
+ """
+
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ add to the vector store. There can be a maximum of 10000 files in a vector
+ store.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
+class ThreadToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+ vector_stores: Iterable[ThreadToolResourcesFileSearchVectorStore]
+ """
+ A helper to create a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ store attached to the thread.
+ """
+
+
+class ThreadToolResources(TypedDict, total=False):
+ code_interpreter: ThreadToolResourcesCodeInterpreter
+
+ file_search: ThreadToolResourcesFileSearch
+
+
+class Thread(TypedDict, total=False):
+ messages: Iterable[ThreadMessage]
+ """
+ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+ start the thread with.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ tool_resources: Optional[ThreadToolResources]
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
+
+
+Tool: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
+
+
+class TruncationStrategy(TypedDict, total=False):
+ type: Required[Literal["auto", "last_messages"]]
+ """The truncation strategy to use for the thread.
+
+ The default is `auto`. If set to `last_messages`, the thread will be truncated
+ to the n most recent messages in the thread. When set to `auto`, messages in the
+ middle of the thread will be dropped to fit the context length of the model,
+ `max_prompt_tokens`.
+ """
+
+ last_messages: Optional[int]
+ """
+ The number of most recent messages from the thread when constructing the context
+ for the run.
+ """
+
+
+class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If `true`, returns a stream of events that happen during the Run as server-sent
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ message.
+ """
+
+
+class ThreadCreateAndRunParamsStreaming(ThreadCreateAndRunParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If `true`, returns a stream of events that happen during the Run as server-sent
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ message.
+ """
+
+
+ThreadCreateAndRunParams = Union[ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py
new file mode 100644
index 00000000..ec1ccf19
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py
@@ -0,0 +1,185 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared_params.metadata import Metadata
+from .code_interpreter_tool_param import CodeInterpreterToolParam
+from .threads.message_content_part_param import MessageContentPartParam
+
+__all__ = [
+ "ThreadCreateParams",
+ "Message",
+ "MessageAttachment",
+ "MessageAttachmentTool",
+ "MessageAttachmentToolFileSearch",
+ "ToolResources",
+ "ToolResourcesCodeInterpreter",
+ "ToolResourcesFileSearch",
+ "ToolResourcesFileSearchVectorStore",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategy",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+ "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
+]
+
+
+class ThreadCreateParams(TypedDict, total=False):
+ messages: Iterable[Message]
+ """
+ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+ start the thread with.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ tool_resources: Optional[ToolResources]
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+
+class MessageAttachmentToolFileSearch(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """The type of tool being defined: `file_search`"""
+
+
+MessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, MessageAttachmentToolFileSearch]
+
+
+class MessageAttachment(TypedDict, total=False):
+ file_id: str
+ """The ID of the file to attach to the message."""
+
+ tools: Iterable[MessageAttachmentTool]
+ """The tools to add this file to."""
+
+
+class Message(TypedDict, total=False):
+ content: Required[Union[str, Iterable[MessageContentPartParam]]]
+ """The text contents of the message."""
+
+ role: Required[Literal["user", "assistant"]]
+ """The role of the entity that is creating the message. Allowed values include:
+
+ - `user`: Indicates the message is sent by an actual user and should be used in
+ most cases to represent user-generated messages.
+ - `assistant`: Indicates the message is generated by the assistant. Use this
+ value to insert messages from the assistant into the conversation.
+ """
+
+ attachments: Optional[Iterable[MessageAttachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+ type: Required[Literal["auto"]]
+ """Always `auto`."""
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+ chunk_overlap_tokens: Required[int]
+ """The number of tokens that overlap between chunks. The default value is `400`.
+
+ Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+ """
+
+ max_chunk_size_tokens: Required[int]
+ """The maximum number of tokens in each chunk.
+
+ The default value is `800`. The minimum value is `100` and the maximum value is
+ `4096`.
+ """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+ static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+ type: Required[Literal["static"]]
+ """Always `static`."""
+
+
+ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+ ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
+]
+
+
+class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
+ chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
+ """The chunking strategy used to chunk the file(s).
+
+ If not set, will use the `auto` strategy.
+ """
+
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ add to the vector store. There can be a maximum of 10000 files in a vector
+ store.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+ vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
+ """
+ A helper to create a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ store attached to the thread.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py
new file mode 100644
index 00000000..d3856263
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ThreadDeleted"]
+
+
+class ThreadDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["thread.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py
new file mode 100644
index 00000000..b47ea8f3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ThreadUpdateParams(TypedDict, total=False):
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ tool_resources: Optional[ToolResources]
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py
new file mode 100644
index 00000000..70853177
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .run import Run as Run
+from .text import Text as Text
+from .message import Message as Message
+from .image_url import ImageURL as ImageURL
+from .annotation import Annotation as Annotation
+from .image_file import ImageFile as ImageFile
+from .run_status import RunStatus as RunStatus
+from .text_delta import TextDelta as TextDelta
+from .message_delta import MessageDelta as MessageDelta
+from .image_url_delta import ImageURLDelta as ImageURLDelta
+from .image_url_param import ImageURLParam as ImageURLParam
+from .message_content import MessageContent as MessageContent
+from .message_deleted import MessageDeleted as MessageDeleted
+from .run_list_params import RunListParams as RunListParams
+from .annotation_delta import AnnotationDelta as AnnotationDelta
+from .image_file_delta import ImageFileDelta as ImageFileDelta
+from .image_file_param import ImageFileParam as ImageFileParam
+from .text_delta_block import TextDeltaBlock as TextDeltaBlock
+from .run_create_params import RunCreateParams as RunCreateParams
+from .run_update_params import RunUpdateParams as RunUpdateParams
+from .text_content_block import TextContentBlock as TextContentBlock
+from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent
+from .message_list_params import MessageListParams as MessageListParams
+from .refusal_delta_block import RefusalDeltaBlock as RefusalDeltaBlock
+from .file_path_annotation import FilePathAnnotation as FilePathAnnotation
+from .image_url_delta_block import ImageURLDeltaBlock as ImageURLDeltaBlock
+from .message_content_delta import MessageContentDelta as MessageContentDelta
+from .message_create_params import MessageCreateParams as MessageCreateParams
+from .message_update_params import MessageUpdateParams as MessageUpdateParams
+from .refusal_content_block import RefusalContentBlock as RefusalContentBlock
+from .image_file_delta_block import ImageFileDeltaBlock as ImageFileDeltaBlock
+from .image_url_content_block import ImageURLContentBlock as ImageURLContentBlock
+from .file_citation_annotation import FileCitationAnnotation as FileCitationAnnotation
+from .image_file_content_block import ImageFileContentBlock as ImageFileContentBlock
+from .text_content_block_param import TextContentBlockParam as TextContentBlockParam
+from .file_path_delta_annotation import FilePathDeltaAnnotation as FilePathDeltaAnnotation
+from .message_content_part_param import MessageContentPartParam as MessageContentPartParam
+from .image_url_content_block_param import ImageURLContentBlockParam as ImageURLContentBlockParam
+from .file_citation_delta_annotation import FileCitationDeltaAnnotation as FileCitationDeltaAnnotation
+from .image_file_content_block_param import ImageFileContentBlockParam as ImageFileContentBlockParam
+from .run_submit_tool_outputs_params import RunSubmitToolOutputsParams as RunSubmitToolOutputsParams
+from .required_action_function_tool_call import RequiredActionFunctionToolCall as RequiredActionFunctionToolCall
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py
new file mode 100644
index 00000000..13c10abf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .file_path_annotation import FilePathAnnotation
+from .file_citation_annotation import FileCitationAnnotation
+
+__all__ = ["Annotation"]
+
+Annotation: TypeAlias = Annotated[Union[FileCitationAnnotation, FilePathAnnotation], PropertyInfo(discriminator="type")]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py
new file mode 100644
index 00000000..c7c6c898
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .file_path_delta_annotation import FilePathDeltaAnnotation
+from .file_citation_delta_annotation import FileCitationDeltaAnnotation
+
+__all__ = ["AnnotationDelta"]
+
+AnnotationDelta: TypeAlias = Annotated[
+ Union[FileCitationDeltaAnnotation, FilePathDeltaAnnotation], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py
new file mode 100644
index 00000000..c3085aed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FileCitationAnnotation", "FileCitation"]
+
+
+class FileCitation(BaseModel):
+ file_id: str
+ """The ID of the specific File the citation is from."""
+
+
+class FileCitationAnnotation(BaseModel):
+ end_index: int
+
+ file_citation: FileCitation
+
+ start_index: int
+
+ text: str
+ """The text in the message content that needs to be replaced."""
+
+ type: Literal["file_citation"]
+ """Always `file_citation`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py
new file mode 100644
index 00000000..b40c0d12
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FileCitationDeltaAnnotation", "FileCitation"]
+
+
+class FileCitation(BaseModel):
+ file_id: Optional[str] = None
+ """The ID of the specific File the citation is from."""
+
+ quote: Optional[str] = None
+ """The specific quote in the file."""
+
+
+class FileCitationDeltaAnnotation(BaseModel):
+ index: int
+ """The index of the annotation in the text content part."""
+
+ type: Literal["file_citation"]
+ """Always `file_citation`."""
+
+ end_index: Optional[int] = None
+
+ file_citation: Optional[FileCitation] = None
+
+ start_index: Optional[int] = None
+
+ text: Optional[str] = None
+ """The text in the message content that needs to be replaced."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py
new file mode 100644
index 00000000..9812737e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FilePathAnnotation", "FilePath"]
+
+
+class FilePath(BaseModel):
+ file_id: str
+ """The ID of the file that was generated."""
+
+
+class FilePathAnnotation(BaseModel):
+ end_index: int
+
+ file_path: FilePath
+
+ start_index: int
+
+ text: str
+ """The text in the message content that needs to be replaced."""
+
+ type: Literal["file_path"]
+ """Always `file_path`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py
new file mode 100644
index 00000000..0cbb445e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FilePathDeltaAnnotation", "FilePath"]
+
+
+class FilePath(BaseModel):
+ file_id: Optional[str] = None
+ """The ID of the file that was generated."""
+
+
+class FilePathDeltaAnnotation(BaseModel):
+ index: int
+ """The index of the annotation in the text content part."""
+
+ type: Literal["file_path"]
+ """Always `file_path`."""
+
+ end_index: Optional[int] = None
+
+ file_path: Optional[FilePath] = None
+
+ start_index: Optional[int] = None
+
+ text: Optional[str] = None
+ """The text in the message content that needs to be replaced."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py
new file mode 100644
index 00000000..6000d975
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageFile"]
+
+
+class ImageFile(BaseModel):
+ file_id: str
+ """
+ The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+ in the message content. Set `purpose="vision"` when uploading the File if you
+ need to later display the file content.
+ """
+
+ detail: Optional[Literal["auto", "low", "high"]] = None
+ """Specifies the detail level of the image if specified by the user.
+
+ `low` uses fewer tokens, you can opt in to high resolution using `high`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py
new file mode 100644
index 00000000..a9099990
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .image_file import ImageFile
+
+__all__ = ["ImageFileContentBlock"]
+
+
+class ImageFileContentBlock(BaseModel):
+ image_file: ImageFile
+
+ type: Literal["image_file"]
+ """Always `image_file`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py
new file mode 100644
index 00000000..48d94bee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .image_file_param import ImageFileParam
+
+__all__ = ["ImageFileContentBlockParam"]
+
+
+class ImageFileContentBlockParam(TypedDict, total=False):
+ image_file: Required[ImageFileParam]
+
+ type: Required[Literal["image_file"]]
+ """Always `image_file`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py
new file mode 100644
index 00000000..4581184c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageFileDelta"]
+
+
+class ImageFileDelta(BaseModel):
+ detail: Optional[Literal["auto", "low", "high"]] = None
+ """Specifies the detail level of the image if specified by the user.
+
+ `low` uses fewer tokens, you can opt in to high resolution using `high`.
+ """
+
+ file_id: Optional[str] = None
+ """
+ The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+ in the message content. Set `purpose="vision"` when uploading the File if you
+ need to later display the file content.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py
new file mode 100644
index 00000000..0a5a2e8a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .image_file_delta import ImageFileDelta
+
+__all__ = ["ImageFileDeltaBlock"]
+
+
+class ImageFileDeltaBlock(BaseModel):
+ index: int
+ """The index of the content part in the message."""
+
+ type: Literal["image_file"]
+ """Always `image_file`."""
+
+ image_file: Optional[ImageFileDelta] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py
new file mode 100644
index 00000000..e4a85358
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ImageFileParam"]
+
+
+class ImageFileParam(TypedDict, total=False):
+ file_id: Required[str]
+ """
+ The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+ in the message content. Set `purpose="vision"` when uploading the File if you
+ need to later display the file content.
+ """
+
+ detail: Literal["auto", "low", "high"]
+ """Specifies the detail level of the image if specified by the user.
+
+ `low` uses fewer tokens, you can opt in to high resolution using `high`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py
new file mode 100644
index 00000000..d1fac147
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageURL"]
+
+
+class ImageURL(BaseModel):
+ url: str
+ """
+ The external URL of the image, must be a supported image types: jpeg, jpg, png,
+ gif, webp.
+ """
+
+ detail: Optional[Literal["auto", "low", "high"]] = None
+ """Specifies the detail level of the image.
+
+ `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
+ value is `auto`
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py
new file mode 100644
index 00000000..40a16c1d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .image_url import ImageURL
+from ...._models import BaseModel
+
+__all__ = ["ImageURLContentBlock"]
+
+
+class ImageURLContentBlock(BaseModel):
+ image_url: ImageURL
+
+ type: Literal["image_url"]
+ """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py
new file mode 100644
index 00000000..585b926c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .image_url_param import ImageURLParam
+
+__all__ = ["ImageURLContentBlockParam"]
+
+
+class ImageURLContentBlockParam(TypedDict, total=False):
+ image_url: Required[ImageURLParam]
+
+ type: Required[Literal["image_url"]]
+ """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py
new file mode 100644
index 00000000..e4026719
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageURLDelta"]
+
+
+class ImageURLDelta(BaseModel):
+ detail: Optional[Literal["auto", "low", "high"]] = None
+ """Specifies the detail level of the image.
+
+ `low` uses fewer tokens, you can opt in to high resolution using `high`.
+ """
+
+ url: Optional[str] = None
+ """
+ The URL of the image, must be a supported image types: jpeg, jpg, png, gif,
+ webp.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py
new file mode 100644
index 00000000..5252da12
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .image_url_delta import ImageURLDelta
+
+__all__ = ["ImageURLDeltaBlock"]
+
+
+class ImageURLDeltaBlock(BaseModel):
+ index: int
+ """The index of the content part in the message."""
+
+ type: Literal["image_url"]
+ """Always `image_url`."""
+
+ image_url: Optional[ImageURLDelta] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py
new file mode 100644
index 00000000..6b7e427e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ImageURLParam"]
+
+
+class ImageURLParam(TypedDict, total=False):
+ url: Required[str]
+ """
+ The external URL of the image, must be a supported image types: jpeg, jpg, png,
+ gif, webp.
+ """
+
+ detail: Literal["auto", "low", "high"]
+ """Specifies the detail level of the image.
+
+ `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
+ value is `auto`
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py
new file mode 100644
index 00000000..4a05a128
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py
@@ -0,0 +1,103 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ...._models import BaseModel
+from .message_content import MessageContent
+from ...shared.metadata import Metadata
+from ..code_interpreter_tool import CodeInterpreterTool
+
+__all__ = [
+ "Message",
+ "Attachment",
+ "AttachmentTool",
+ "AttachmentToolAssistantToolsFileSearchTypeOnly",
+ "IncompleteDetails",
+]
+
+
+class AttachmentToolAssistantToolsFileSearchTypeOnly(BaseModel):
+ type: Literal["file_search"]
+ """The type of tool being defined: `file_search`"""
+
+
+AttachmentTool: TypeAlias = Union[CodeInterpreterTool, AttachmentToolAssistantToolsFileSearchTypeOnly]
+
+
+class Attachment(BaseModel):
+ file_id: Optional[str] = None
+ """The ID of the file to attach to the message."""
+
+ tools: Optional[List[AttachmentTool]] = None
+ """The tools to add this file to."""
+
+
+class IncompleteDetails(BaseModel):
+ reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"]
+ """The reason the message is incomplete."""
+
+
+class Message(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ assistant_id: Optional[str] = None
+ """
+ If applicable, the ID of the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) that
+ authored this message.
+ """
+
+ attachments: Optional[List[Attachment]] = None
+ """A list of files attached to the message, and the tools they were added to."""
+
+ completed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the message was completed."""
+
+ content: List[MessageContent]
+ """The content of the message in array of text and/or images."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the message was created."""
+
+ incomplete_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the message was marked as incomplete."""
+
+ incomplete_details: Optional[IncompleteDetails] = None
+ """On an incomplete message, details about why the message is incomplete."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ object: Literal["thread.message"]
+ """The object type, which is always `thread.message`."""
+
+ role: Literal["user", "assistant"]
+ """The entity that produced the message. One of `user` or `assistant`."""
+
+ run_id: Optional[str] = None
+ """
+ The ID of the [run](https://platform.openai.com/docs/api-reference/runs)
+ associated with the creation of this message. Value is `null` when messages are
+ created manually using the create message or create thread endpoints.
+ """
+
+ status: Literal["in_progress", "incomplete", "completed"]
+ """
+ The status of the message, which can be either `in_progress`, `incomplete`, or
+ `completed`.
+ """
+
+ thread_id: str
+ """
+ The [thread](https://platform.openai.com/docs/api-reference/threads) ID that
+ this message belongs to.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py
new file mode 100644
index 00000000..9523c1e1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .text_content_block import TextContentBlock
+from .refusal_content_block import RefusalContentBlock
+from .image_url_content_block import ImageURLContentBlock
+from .image_file_content_block import ImageFileContentBlock
+
+__all__ = ["MessageContent"]
+
+
+MessageContent: TypeAlias = Annotated[
+ Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py
new file mode 100644
index 00000000..b6e7dfa4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .text_delta_block import TextDeltaBlock
+from .refusal_delta_block import RefusalDeltaBlock
+from .image_url_delta_block import ImageURLDeltaBlock
+from .image_file_delta_block import ImageFileDeltaBlock
+
+__all__ = ["MessageContentDelta"]
+
+MessageContentDelta: TypeAlias = Annotated[
+ Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py
new file mode 100644
index 00000000..dc09a01c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .text_content_block_param import TextContentBlockParam
+from .image_url_content_block_param import ImageURLContentBlockParam
+from .image_file_content_block_param import ImageFileContentBlockParam
+
+__all__ = ["MessageContentPartParam"]
+
+MessageContentPartParam: TypeAlias = Union[ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py
new file mode 100644
index 00000000..b5238682
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ...shared_params.metadata import Metadata
+from .message_content_part_param import MessageContentPartParam
+from ..code_interpreter_tool_param import CodeInterpreterToolParam
+
+__all__ = ["MessageCreateParams", "Attachment", "AttachmentTool", "AttachmentToolFileSearch"]
+
+
+class MessageCreateParams(TypedDict, total=False):
+ content: Required[Union[str, Iterable[MessageContentPartParam]]]
+ """The text contents of the message."""
+
+ role: Required[Literal["user", "assistant"]]
+ """The role of the entity that is creating the message. Allowed values include:
+
+ - `user`: Indicates the message is sent by an actual user and should be used in
+ most cases to represent user-generated messages.
+ - `assistant`: Indicates the message is generated by the assistant. Use this
+ value to insert messages from the assistant into the conversation.
+ """
+
+ attachments: Optional[Iterable[Attachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
+class AttachmentToolFileSearch(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """The type of tool being defined: `file_search`"""
+
+
+AttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, AttachmentToolFileSearch]
+
+
+class Attachment(TypedDict, total=False):
+ file_id: str
+ """The ID of the file to attach to the message."""
+
+ tools: Iterable[AttachmentTool]
+ """The tools to add this file to."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py
new file mode 100644
index 00000000..48210777
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["MessageDeleted"]
+
+
+class MessageDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["thread.message.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py
new file mode 100644
index 00000000..ecd0dfe3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .message_content_delta import MessageContentDelta
+
+__all__ = ["MessageDelta"]
+
+
+class MessageDelta(BaseModel):
+ content: Optional[List[MessageContentDelta]] = None
+ """The content of the message in array of text and/or images."""
+
+ role: Optional[Literal["user", "assistant"]] = None
+ """The entity that produced the message. One of `user` or `assistant`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py
new file mode 100644
index 00000000..3811cef6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .message_delta import MessageDelta
+
+__all__ = ["MessageDeltaEvent"]
+
+
+class MessageDeltaEvent(BaseModel):
+ id: str
+ """The identifier of the message, which can be referenced in API endpoints."""
+
+ delta: MessageDelta
+ """The delta containing the fields that have changed on the Message."""
+
+ object: Literal["thread.message.delta"]
+ """The object type, which is always `thread.message.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py
new file mode 100644
index 00000000..a7c22a66
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["MessageListParams"]
+
+
+class MessageListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
+
+ run_id: str
+ """Filter messages by the run ID that generated them."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py
new file mode 100644
index 00000000..bb078281
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...shared_params.metadata import Metadata
+
+__all__ = ["MessageUpdateParams"]
+
+
+class MessageUpdateParams(TypedDict, total=False):
+ thread_id: Required[str]
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py
new file mode 100644
index 00000000..d54f9485
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalContentBlock"]
+
+
+class RefusalContentBlock(BaseModel):
+ refusal: str
+
+ type: Literal["refusal"]
+ """Always `refusal`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py
new file mode 100644
index 00000000..dbd8e626
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalDeltaBlock"]
+
+
+class RefusalDeltaBlock(BaseModel):
+ index: int
+ """The index of the refusal part in the message."""
+
+ type: Literal["refusal"]
+ """Always `refusal`."""
+
+ refusal: Optional[str] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py
new file mode 100644
index 00000000..a24dfd06
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RequiredActionFunctionToolCall", "Function"]
+
+
+class Function(BaseModel):
+ arguments: str
+ """The arguments that the model expects you to pass to the function."""
+
+ name: str
+ """The name of the function."""
+
+
+class RequiredActionFunctionToolCall(BaseModel):
+ id: str
+ """The ID of the tool call.
+
+ This ID must be referenced when you submit the tool outputs in using the
+ [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+ endpoint.
+ """
+
+ function: Function
+ """The function definition."""
+
+ type: Literal["function"]
+ """The type of tool call the output is required for.
+
+ For now, this is always `function`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py
new file mode 100644
index 00000000..da9418d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py
@@ -0,0 +1,245 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .run_status import RunStatus
+from ..assistant_tool import AssistantTool
+from ...shared.metadata import Metadata
+from ..assistant_tool_choice_option import AssistantToolChoiceOption
+from ..assistant_response_format_option import AssistantResponseFormatOption
+from .required_action_function_tool_call import RequiredActionFunctionToolCall
+
+__all__ = [
+ "Run",
+ "IncompleteDetails",
+ "LastError",
+ "RequiredAction",
+ "RequiredActionSubmitToolOutputs",
+ "TruncationStrategy",
+ "Usage",
+]
+
+
+class IncompleteDetails(BaseModel):
+ reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None
+ """The reason why the run is incomplete.
+
+ This will point to which specific token limit was reached over the course of the
+ run.
+ """
+
+
+class LastError(BaseModel):
+ code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
+ """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`."""
+
+ message: str
+ """A human-readable description of the error."""
+
+
+class RequiredActionSubmitToolOutputs(BaseModel):
+ tool_calls: List[RequiredActionFunctionToolCall]
+ """A list of the relevant tool calls."""
+
+
+class RequiredAction(BaseModel):
+ submit_tool_outputs: RequiredActionSubmitToolOutputs
+ """Details on the tool outputs needed for this run to continue."""
+
+ type: Literal["submit_tool_outputs"]
+ """For now, this is always `submit_tool_outputs`."""
+
+
+class TruncationStrategy(BaseModel):
+ type: Literal["auto", "last_messages"]
+ """The truncation strategy to use for the thread.
+
+ The default is `auto`. If set to `last_messages`, the thread will be truncated
+ to the n most recent messages in the thread. When set to `auto`, messages in the
+ middle of the thread will be dropped to fit the context length of the model,
+ `max_prompt_tokens`.
+ """
+
+ last_messages: Optional[int] = None
+ """
+ The number of most recent messages from the thread when constructing the context
+ for the run.
+ """
+
+
+class Usage(BaseModel):
+ completion_tokens: int
+ """Number of completion tokens used over the course of the run."""
+
+ prompt_tokens: int
+ """Number of prompt tokens used over the course of the run."""
+
+ total_tokens: int
+ """Total number of tokens used (prompt + completion)."""
+
+
+class Run(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ assistant_id: str
+ """
+ The ID of the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ execution of this run.
+ """
+
+ cancelled_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run was cancelled."""
+
+ completed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run was completed."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the run was created."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run will expire."""
+
+ failed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run failed."""
+
+ incomplete_details: Optional[IncompleteDetails] = None
+ """Details on why the run is incomplete.
+
+ Will be `null` if the run is not incomplete.
+ """
+
+ instructions: str
+ """
+ The instructions that the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ this run.
+ """
+
+ last_error: Optional[LastError] = None
+ """The last error associated with this run. Will be `null` if there are no errors."""
+
+ max_completion_tokens: Optional[int] = None
+ """
+ The maximum number of completion tokens specified to have been used over the
+ course of the run.
+ """
+
+ max_prompt_tokens: Optional[int] = None
+ """
+ The maximum number of prompt tokens specified to have been used over the course
+ of the run.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: str
+ """
+ The model that the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ this run.
+ """
+
+ object: Literal["thread.run"]
+ """The object type, which is always `thread.run`."""
+
+ parallel_tool_calls: bool
+ """
+ Whether to enable
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+ during tool use.
+ """
+
+ required_action: Optional[RequiredAction] = None
+ """Details on the action required to continue the run.
+
+ Will be `null` if no action is required.
+ """
+
+ response_format: Optional[AssistantResponseFormatOption] = None
+ """Specifies the format that the model must output.
+
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
+ in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ started_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run was started."""
+
+ status: RunStatus
+ """
+ The status of the run, which can be either `queued`, `in_progress`,
+ `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
+ `incomplete`, or `expired`.
+ """
+
+ thread_id: str
+ """
+ The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
+ that was executed on as a part of this run.
+ """
+
+ tool_choice: Optional[AssistantToolChoiceOption] = None
+ """
+ Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tools and instead generates a message. `auto` is the default value
+ and means the model can pick between generating a message or calling one or more
+ tools. `required` means the model must call one or more tools before responding
+ to the user. Specifying a particular tool like `{"type": "file_search"}` or
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+ """
+
+ tools: List[AssistantTool]
+ """
+ The list of tools that the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ this run.
+ """
+
+ truncation_strategy: Optional[TruncationStrategy] = None
+ """Controls for how a thread will be truncated prior to the run.
+
+ Use this to control the intial context window of the run.
+ """
+
+ usage: Optional[Usage] = None
+ """Usage statistics related to the run.
+
+ This value will be `null` if the run is not in a terminal state (i.e.
+ `in_progress`, `queued`, etc.).
+ """
+
+ temperature: Optional[float] = None
+ """The sampling temperature used for this run. If not set, defaults to 1."""
+
+ top_p: Optional[float] = None
+ """The nucleus sampling value used for this run. If not set, defaults to 1."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py
new file mode 100644
index 00000000..fc702278
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py
@@ -0,0 +1,261 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ...shared.chat_model import ChatModel
+from ..assistant_tool_param import AssistantToolParam
+from .runs.run_step_include import RunStepInclude
+from ...shared_params.metadata import Metadata
+from ...shared.reasoning_effort import ReasoningEffort
+from .message_content_part_param import MessageContentPartParam
+from ..code_interpreter_tool_param import CodeInterpreterToolParam
+from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
+from ..assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = [
+ "RunCreateParamsBase",
+ "AdditionalMessage",
+ "AdditionalMessageAttachment",
+ "AdditionalMessageAttachmentTool",
+ "AdditionalMessageAttachmentToolFileSearch",
+ "TruncationStrategy",
+ "RunCreateParamsNonStreaming",
+ "RunCreateParamsStreaming",
+]
+
+
+class RunCreateParamsBase(TypedDict, total=False):
+ assistant_id: Required[str]
+ """
+ The ID of the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ execute this run.
+ """
+
+ include: List[RunStepInclude]
+ """A list of additional fields to include in the response.
+
+ Currently the only supported value is
+ `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+ search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+ for more information.
+ """
+
+ additional_instructions: Optional[str]
+ """Appends additional instructions at the end of the instructions for the run.
+
+ This is useful for modifying the behavior on a per-run basis without overriding
+ other instructions.
+ """
+
+ additional_messages: Optional[Iterable[AdditionalMessage]]
+ """Adds additional messages to the thread before creating the run."""
+
+ instructions: Optional[str]
+ """
+ Overrides the
+ [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+ of the assistant. This is useful for modifying the behavior on a per-run basis.
+ """
+
+ max_completion_tokens: Optional[int]
+ """
+ The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run. If the run exceeds the number of
+ completion tokens specified, the run will end with status `incomplete`. See
+ `incomplete_details` for more info.
+ """
+
+ max_prompt_tokens: Optional[int]
+ """The maximum number of prompt tokens that may be used over the course of the run.
+
+ The run will make a best effort to use only the number of prompt tokens
+ specified, across multiple turns of the run. If the run exceeds the number of
+ prompt tokens specified, the run will end with status `incomplete`. See
+ `incomplete_details` for more info.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: Union[str, ChatModel, None]
+ """
+ The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ be used to execute this run. If a value is provided here, it will override the
+ model associated with the assistant. If not, the model associated with the
+ assistant will be used.
+ """
+
+ parallel_tool_calls: bool
+ """
+ Whether to enable
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+ during tool use.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort]
+ """**o-series models only**
+
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+ result in faster responses and fewer tokens used on reasoning in a response.
+ """
+
+ response_format: Optional[AssistantResponseFormatOptionParam]
+ """Specifies the format that the model must output.
+
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
+ in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+ """
+
+ tool_choice: Optional[AssistantToolChoiceOptionParam]
+ """
+ Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tools and instead generates a message. `auto` is the default value
+ and means the model can pick between generating a message or calling one or more
+ tools. `required` means the model must call one or more tools before responding
+ to the user. Specifying a particular tool like `{"type": "file_search"}` or
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+ """
+
+ tools: Optional[Iterable[AssistantToolParam]]
+ """Override the tools the assistant can use for this run.
+
+ This is useful for modifying the behavior on a per-run basis.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
+ """
+
+ truncation_strategy: Optional[TruncationStrategy]
+ """Controls for how a thread will be truncated prior to the run.
+
+ Use this to control the intial context window of the run.
+ """
+
+
+class AdditionalMessageAttachmentToolFileSearch(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """The type of tool being defined: `file_search`"""
+
+
+AdditionalMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, AdditionalMessageAttachmentToolFileSearch]
+
+
+class AdditionalMessageAttachment(TypedDict, total=False):
+ file_id: str
+ """The ID of the file to attach to the message."""
+
+ tools: Iterable[AdditionalMessageAttachmentTool]
+ """The tools to add this file to."""
+
+
+class AdditionalMessage(TypedDict, total=False):
+ content: Required[Union[str, Iterable[MessageContentPartParam]]]
+ """The text contents of the message."""
+
+ role: Required[Literal["user", "assistant"]]
+ """The role of the entity that is creating the message. Allowed values include:
+
+ - `user`: Indicates the message is sent by an actual user and should be used in
+ most cases to represent user-generated messages.
+ - `assistant`: Indicates the message is generated by the assistant. Use this
+ value to insert messages from the assistant into the conversation.
+ """
+
+ attachments: Optional[Iterable[AdditionalMessageAttachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
+class TruncationStrategy(TypedDict, total=False):
+ type: Required[Literal["auto", "last_messages"]]
+ """The truncation strategy to use for the thread.
+
+ The default is `auto`. If set to `last_messages`, the thread will be truncated
+ to the n most recent messages in the thread. When set to `auto`, messages in the
+ middle of the thread will be dropped to fit the context length of the model,
+ `max_prompt_tokens`.
+ """
+
+ last_messages: Optional[int]
+ """
+ The number of most recent messages from the thread when constructing the context
+ for the run.
+ """
+
+
+class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If `true`, returns a stream of events that happen during the Run as server-sent
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ message.
+ """
+
+
+class RunCreateParamsStreaming(RunCreateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If `true`, returns a stream of events that happen during the Run as server-sent
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ message.
+ """
+
+
+RunCreateParams = Union[RunCreateParamsNonStreaming, RunCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py
new file mode 100644
index 00000000..fbea54f6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["RunListParams"]
+
+
+class RunListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py
new file mode 100644
index 00000000..47c7cbd0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["RunStatus"]
+
+RunStatus: TypeAlias = Literal[
+ "queued",
+ "in_progress",
+ "requires_action",
+ "cancelling",
+ "cancelled",
+ "failed",
+ "completed",
+ "incomplete",
+ "expired",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py
new file mode 100644
index 00000000..14772860
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = [
+ "RunSubmitToolOutputsParamsBase",
+ "ToolOutput",
+ "RunSubmitToolOutputsParamsNonStreaming",
+ "RunSubmitToolOutputsParamsStreaming",
+]
+
+
+class RunSubmitToolOutputsParamsBase(TypedDict, total=False):
+ thread_id: Required[str]
+
+ tool_outputs: Required[Iterable[ToolOutput]]
+ """A list of tools for which the outputs are being submitted."""
+
+
+class ToolOutput(TypedDict, total=False):
+ output: str
+ """The output of the tool call to be submitted to continue the run."""
+
+ tool_call_id: str
+ """
+ The ID of the tool call in the `required_action` object within the run object
+ the output is being submitted for.
+ """
+
+
+class RunSubmitToolOutputsParamsNonStreaming(RunSubmitToolOutputsParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If `true`, returns a stream of events that happen during the Run as server-sent
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ message.
+ """
+
+
+class RunSubmitToolOutputsParamsStreaming(RunSubmitToolOutputsParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If `true`, returns a stream of events that happen during the Run as server-sent
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ message.
+ """
+
+
+RunSubmitToolOutputsParams = Union[RunSubmitToolOutputsParamsNonStreaming, RunSubmitToolOutputsParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py
new file mode 100644
index 00000000..fbcbd3fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...shared_params.metadata import Metadata
+
+__all__ = ["RunUpdateParams"]
+
+
+class RunUpdateParams(TypedDict, total=False):
+ thread_id: Required[str]
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py
new file mode 100644
index 00000000..467d5d79
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .run_step import RunStep as RunStep
+from .tool_call import ToolCall as ToolCall
+from .run_step_delta import RunStepDelta as RunStepDelta
+from .tool_call_delta import ToolCallDelta as ToolCallDelta
+from .run_step_include import RunStepInclude as RunStepInclude
+from .step_list_params import StepListParams as StepListParams
+from .function_tool_call import FunctionToolCall as FunctionToolCall
+from .run_step_delta_event import RunStepDeltaEvent as RunStepDeltaEvent
+from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams
+from .code_interpreter_logs import CodeInterpreterLogs as CodeInterpreterLogs
+from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall
+from .tool_call_delta_object import ToolCallDeltaObject as ToolCallDeltaObject
+from .tool_calls_step_details import ToolCallsStepDetails as ToolCallsStepDetails
+from .function_tool_call_delta import FunctionToolCallDelta as FunctionToolCallDelta
+from .code_interpreter_tool_call import CodeInterpreterToolCall as CodeInterpreterToolCall
+from .file_search_tool_call_delta import FileSearchToolCallDelta as FileSearchToolCallDelta
+from .run_step_delta_message_delta import RunStepDeltaMessageDelta as RunStepDeltaMessageDelta
+from .code_interpreter_output_image import CodeInterpreterOutputImage as CodeInterpreterOutputImage
+from .message_creation_step_details import MessageCreationStepDetails as MessageCreationStepDetails
+from .code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py
new file mode 100644
index 00000000..0bf8c1da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["CodeInterpreterLogs"]
+
+
+class CodeInterpreterLogs(BaseModel):
+ index: int
+ """The index of the output in the outputs array."""
+
+ type: Literal["logs"]
+ """Always `logs`."""
+
+ logs: Optional[str] = None
+ """The text output from the Code Interpreter tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py
new file mode 100644
index 00000000..2257f37e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["CodeInterpreterOutputImage", "Image"]
+
+
+class Image(BaseModel):
+ file_id: Optional[str] = None
+ """
+ The [file](https://platform.openai.com/docs/api-reference/files) ID of the
+ image.
+ """
+
+
+class CodeInterpreterOutputImage(BaseModel):
+ index: int
+ """The index of the output in the outputs array."""
+
+ type: Literal["image"]
+ """Always `image`."""
+
+ image: Optional[Image] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py
new file mode 100644
index 00000000..e7df4e19
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py
@@ -0,0 +1,70 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+
+__all__ = [
+ "CodeInterpreterToolCall",
+ "CodeInterpreter",
+ "CodeInterpreterOutput",
+ "CodeInterpreterOutputLogs",
+ "CodeInterpreterOutputImage",
+ "CodeInterpreterOutputImageImage",
+]
+
+
+class CodeInterpreterOutputLogs(BaseModel):
+ logs: str
+ """The text output from the Code Interpreter tool call."""
+
+ type: Literal["logs"]
+ """Always `logs`."""
+
+
+class CodeInterpreterOutputImageImage(BaseModel):
+ file_id: str
+ """
+ The [file](https://platform.openai.com/docs/api-reference/files) ID of the
+ image.
+ """
+
+
+class CodeInterpreterOutputImage(BaseModel):
+ image: CodeInterpreterOutputImageImage
+
+ type: Literal["image"]
+ """Always `image`."""
+
+
+CodeInterpreterOutput: TypeAlias = Annotated[
+ Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage], PropertyInfo(discriminator="type")
+]
+
+
+class CodeInterpreter(BaseModel):
+ input: str
+ """The input to the Code Interpreter tool call."""
+
+ outputs: List[CodeInterpreterOutput]
+ """The outputs from the Code Interpreter tool call.
+
+ Code Interpreter can output one or more items, including text (`logs`) or images
+ (`image`). Each of these are represented by a different object type.
+ """
+
+
+class CodeInterpreterToolCall(BaseModel):
+ id: str
+ """The ID of the tool call."""
+
+ code_interpreter: CodeInterpreter
+ """The Code Interpreter tool call definition."""
+
+ type: Literal["code_interpreter"]
+ """The type of tool call.
+
+ This is always going to be `code_interpreter` for this type of tool call.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py
new file mode 100644
index 00000000..9d7a1563
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+from .code_interpreter_logs import CodeInterpreterLogs
+from .code_interpreter_output_image import CodeInterpreterOutputImage
+
+__all__ = ["CodeInterpreterToolCallDelta", "CodeInterpreter", "CodeInterpreterOutput"]
+
+CodeInterpreterOutput: TypeAlias = Annotated[
+ Union[CodeInterpreterLogs, CodeInterpreterOutputImage], PropertyInfo(discriminator="type")
+]
+
+
+class CodeInterpreter(BaseModel):
+ input: Optional[str] = None
+ """The input to the Code Interpreter tool call."""
+
+ outputs: Optional[List[CodeInterpreterOutput]] = None
+ """The outputs from the Code Interpreter tool call.
+
+ Code Interpreter can output one or more items, including text (`logs`) or images
+ (`image`). Each of these are represented by a different object type.
+ """
+
+
+class CodeInterpreterToolCallDelta(BaseModel):
+ index: int
+ """The index of the tool call in the tool calls array."""
+
+ type: Literal["code_interpreter"]
+ """The type of tool call.
+
+ This is always going to be `code_interpreter` for this type of tool call.
+ """
+
+ id: Optional[str] = None
+ """The ID of the tool call."""
+
+ code_interpreter: Optional[CodeInterpreter] = None
+ """The Code Interpreter tool call definition."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py
new file mode 100644
index 00000000..a2068daa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = [
+ "FileSearchToolCall",
+ "FileSearch",
+ "FileSearchRankingOptions",
+ "FileSearchResult",
+ "FileSearchResultContent",
+]
+
+
+class FileSearchRankingOptions(BaseModel):
+ ranker: Literal["auto", "default_2024_08_21"]
+ """The ranker to use for the file search.
+
+ If not specified will use the `auto` ranker.
+ """
+
+ score_threshold: float
+ """The score threshold for the file search.
+
+ All values must be a floating point number between 0 and 1.
+ """
+
+
+class FileSearchResultContent(BaseModel):
+ text: Optional[str] = None
+ """The text content of the file."""
+
+ type: Optional[Literal["text"]] = None
+ """The type of the content."""
+
+
+class FileSearchResult(BaseModel):
+ file_id: str
+ """The ID of the file that result was found in."""
+
+ file_name: str
+ """The name of the file that result was found in."""
+
+ score: float
+ """The score of the result.
+
+ All values must be a floating point number between 0 and 1.
+ """
+
+ content: Optional[List[FileSearchResultContent]] = None
+ """The content of the result that was found.
+
+ The content is only included if requested via the include query parameter.
+ """
+
+
+class FileSearch(BaseModel):
+ ranking_options: Optional[FileSearchRankingOptions] = None
+ """The ranking options for the file search."""
+
+ results: Optional[List[FileSearchResult]] = None
+ """The results of the file search."""
+
+
+class FileSearchToolCall(BaseModel):
+ id: str
+ """The ID of the tool call object."""
+
+ file_search: FileSearch
+ """For now, this is always going to be an empty object."""
+
+ type: Literal["file_search"]
+ """The type of tool call.
+
+ This is always going to be `file_search` for this type of tool call.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py
new file mode 100644
index 00000000..df5ac217
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["FileSearchToolCallDelta"]
+
+
+class FileSearchToolCallDelta(BaseModel):
+ file_search: object
+ """For now, this is always going to be an empty object."""
+
+ index: int
+ """The index of the tool call in the tool calls array."""
+
+ type: Literal["file_search"]
+ """The type of tool call.
+
+ This is always going to be `file_search` for this type of tool call.
+ """
+
+ id: Optional[str] = None
+ """The ID of the tool call object."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py
new file mode 100644
index 00000000..b1d354f8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py
@@ -0,0 +1,38 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["FunctionToolCall", "Function"]
+
+
+class Function(BaseModel):
+ arguments: str
+ """The arguments passed to the function."""
+
+ name: str
+ """The name of the function."""
+
+ output: Optional[str] = None
+ """The output of the function.
+
+ This will be `null` if the outputs have not been
+ [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+ yet.
+ """
+
+
+class FunctionToolCall(BaseModel):
+ id: str
+ """The ID of the tool call object."""
+
+ function: Function
+ """The definition of the function that was called."""
+
+ type: Literal["function"]
+ """The type of tool call.
+
+ This is always going to be `function` for this type of tool call.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py
new file mode 100644
index 00000000..faaf026f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["FunctionToolCallDelta", "Function"]
+
+
+class Function(BaseModel):
+ arguments: Optional[str] = None
+ """The arguments passed to the function."""
+
+ name: Optional[str] = None
+ """The name of the function."""
+
+ output: Optional[str] = None
+ """The output of the function.
+
+ This will be `null` if the outputs have not been
+ [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+ yet.
+ """
+
+
+class FunctionToolCallDelta(BaseModel):
+ index: int
+ """The index of the tool call in the tool calls array."""
+
+ type: Literal["function"]
+ """The type of tool call.
+
+ This is always going to be `function` for this type of tool call.
+ """
+
+ id: Optional[str] = None
+ """The ID of the tool call object."""
+
+ function: Optional[Function] = None
+ """The definition of the function that was called."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py
new file mode 100644
index 00000000..73439079
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["MessageCreationStepDetails", "MessageCreation"]
+
+
+class MessageCreation(BaseModel):
+ message_id: str
+ """The ID of the message that was created by this run step."""
+
+
+class MessageCreationStepDetails(BaseModel):
+ message_creation: MessageCreation
+
+ type: Literal["message_creation"]
+ """Always `message_creation`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py
new file mode 100644
index 00000000..b5f380c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py
@@ -0,0 +1,115 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+from ....shared.metadata import Metadata
+from .tool_calls_step_details import ToolCallsStepDetails
+from .message_creation_step_details import MessageCreationStepDetails
+
+__all__ = ["RunStep", "LastError", "StepDetails", "Usage"]
+
+
+class LastError(BaseModel):
+ code: Literal["server_error", "rate_limit_exceeded"]
+ """One of `server_error` or `rate_limit_exceeded`."""
+
+ message: str
+ """A human-readable description of the error."""
+
+
+StepDetails: TypeAlias = Annotated[
+ Union[MessageCreationStepDetails, ToolCallsStepDetails], PropertyInfo(discriminator="type")
+]
+
+
+class Usage(BaseModel):
+ completion_tokens: int
+ """Number of completion tokens used over the course of the run step."""
+
+ prompt_tokens: int
+ """Number of prompt tokens used over the course of the run step."""
+
+ total_tokens: int
+ """Total number of tokens used (prompt + completion)."""
+
+
+class RunStep(BaseModel):
+ id: str
+ """The identifier of the run step, which can be referenced in API endpoints."""
+
+ assistant_id: str
+ """
+ The ID of the
+ [assistant](https://platform.openai.com/docs/api-reference/assistants)
+ associated with the run step.
+ """
+
+ cancelled_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run step was cancelled."""
+
+ completed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run step completed."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the run step was created."""
+
+ expired_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run step expired.
+
+ A step is considered expired if the parent run is expired.
+ """
+
+ failed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the run step failed."""
+
+ last_error: Optional[LastError] = None
+ """The last error associated with this run step.
+
+ Will be `null` if there are no errors.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ object: Literal["thread.run.step"]
+ """The object type, which is always `thread.run.step`."""
+
+ run_id: str
+ """
+ The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that
+ this run step is a part of.
+ """
+
+ status: Literal["in_progress", "cancelled", "failed", "completed", "expired"]
+ """
+ The status of the run step, which can be either `in_progress`, `cancelled`,
+ `failed`, `completed`, or `expired`.
+ """
+
+ step_details: StepDetails
+ """The details of the run step."""
+
+ thread_id: str
+ """
+ The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
+ that was run.
+ """
+
+ type: Literal["message_creation", "tool_calls"]
+ """The type of run step, which can be either `message_creation` or `tool_calls`."""
+
+ usage: Optional[Usage] = None
+ """Usage statistics related to the run step.
+
+ This value will be `null` while the run step's status is `in_progress`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py
new file mode 100644
index 00000000..1139088f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+from .tool_call_delta_object import ToolCallDeltaObject
+from .run_step_delta_message_delta import RunStepDeltaMessageDelta
+
+__all__ = ["RunStepDelta", "StepDetails"]
+
+StepDetails: TypeAlias = Annotated[
+ Union[RunStepDeltaMessageDelta, ToolCallDeltaObject], PropertyInfo(discriminator="type")
+]
+
+
+class RunStepDelta(BaseModel):
+ step_details: Optional[StepDetails] = None
+ """The details of the run step."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py
new file mode 100644
index 00000000..7f3f92aa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+from .run_step_delta import RunStepDelta
+
+__all__ = ["RunStepDeltaEvent"]
+
+
+class RunStepDeltaEvent(BaseModel):
+ id: str
+ """The identifier of the run step, which can be referenced in API endpoints."""
+
+ delta: RunStepDelta
+ """The delta containing the fields that have changed on the run step."""
+
+ object: Literal["thread.run.step.delta"]
+ """The object type, which is always `thread.run.step.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py
new file mode 100644
index 00000000..f58ed3d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["RunStepDeltaMessageDelta", "MessageCreation"]
+
+
+class MessageCreation(BaseModel):
+ message_id: Optional[str] = None
+ """The ID of the message that was created by this run step."""
+
+
+class RunStepDeltaMessageDelta(BaseModel):
+ type: Literal["message_creation"]
+ """Always `message_creation`."""
+
+ message_creation: Optional[MessageCreation] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py
new file mode 100644
index 00000000..8e76c1b7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["RunStepInclude"]
+
+RunStepInclude: TypeAlias = Literal["step_details.tool_calls[*].file_search.results[*].content"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py
new file mode 100644
index 00000000..a6be771d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py
@@ -0,0 +1,56 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from .run_step_include import RunStepInclude
+
+__all__ = ["StepListParams"]
+
+
+class StepListParams(TypedDict, total=False):
+ thread_id: Required[str]
+
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ include: List[RunStepInclude]
+ """A list of additional fields to include in the response.
+
+ Currently the only supported value is
+ `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+ search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+ for more information.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py
new file mode 100644
index 00000000..ecbb72ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+from .run_step_include import RunStepInclude
+
+__all__ = ["StepRetrieveParams"]
+
+
+class StepRetrieveParams(TypedDict, total=False):
+ thread_id: Required[str]
+
+ run_id: Required[str]
+
+ include: List[RunStepInclude]
+ """A list of additional fields to include in the response.
+
+ Currently the only supported value is
+ `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+ search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+ for more information.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py
new file mode 100644
index 00000000..565e3109
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from .function_tool_call import FunctionToolCall
+from .file_search_tool_call import FileSearchToolCall
+from .code_interpreter_tool_call import CodeInterpreterToolCall
+
+__all__ = ["ToolCall"]
+
+ToolCall: TypeAlias = Annotated[
+ Union[CodeInterpreterToolCall, FileSearchToolCall, FunctionToolCall], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py
new file mode 100644
index 00000000..f0b8070c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from .function_tool_call_delta import FunctionToolCallDelta
+from .file_search_tool_call_delta import FileSearchToolCallDelta
+from .code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta
+
+__all__ = ["ToolCallDelta"]
+
+ToolCallDelta: TypeAlias = Annotated[
+ Union[CodeInterpreterToolCallDelta, FileSearchToolCallDelta, FunctionToolCallDelta],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py
new file mode 100644
index 00000000..189dce77
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+from .tool_call_delta import ToolCallDelta
+
+__all__ = ["ToolCallDeltaObject"]
+
+
+class ToolCallDeltaObject(BaseModel):
+ type: Literal["tool_calls"]
+ """Always `tool_calls`."""
+
+ tool_calls: Optional[List[ToolCallDelta]] = None
+ """An array of tool calls the run step was involved in.
+
+ These can be associated with one of three types of tools: `code_interpreter`,
+ `file_search`, or `function`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py
new file mode 100644
index 00000000..a084d387
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from .tool_call import ToolCall
+from ....._models import BaseModel
+
+__all__ = ["ToolCallsStepDetails"]
+
+
+class ToolCallsStepDetails(BaseModel):
+ tool_calls: List[ToolCall]
+ """An array of tool calls the run step was involved in.
+
+ These can be associated with one of three types of tools: `code_interpreter`,
+ `file_search`, or `function`.
+ """
+
+ type: Literal["tool_calls"]
+ """Always `tool_calls`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py
new file mode 100644
index 00000000..853bec29
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ...._models import BaseModel
+from .annotation import Annotation
+
+__all__ = ["Text"]
+
+
+class Text(BaseModel):
+ annotations: List[Annotation]
+
+ value: str
+ """The data that makes up the text."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py
new file mode 100644
index 00000000..3706d6b9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .text import Text
+from ...._models import BaseModel
+
+__all__ = ["TextContentBlock"]
+
+
+class TextContentBlock(BaseModel):
+ text: Text
+
+ type: Literal["text"]
+ """Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py
new file mode 100644
index 00000000..6313de32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["TextContentBlockParam"]
+
+
+class TextContentBlockParam(TypedDict, total=False):
+ text: Required[str]
+ """Text content to be sent to the model"""
+
+ type: Required[Literal["text"]]
+ """Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py
new file mode 100644
index 00000000..09cd3570
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from .annotation_delta import AnnotationDelta
+
+__all__ = ["TextDelta"]
+
+
+class TextDelta(BaseModel):
+ annotations: Optional[List[AnnotationDelta]] = None
+
+ value: Optional[str] = None
+ """The data that makes up the text."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py
new file mode 100644
index 00000000..586116e0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .text_delta import TextDelta
+
+__all__ = ["TextDeltaBlock"]
+
+
+class TextDeltaBlock(BaseModel):
+ index: int
+ """The index of the content part in the message."""
+
+ type: Literal["text"]
+ """Always `text`."""
+
+ text: Optional[TextDelta] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py
new file mode 100644
index 00000000..b4f43b29
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py
@@ -0,0 +1,71 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .chat_completion import ChatCompletion as ChatCompletion
+from .chat_completion_role import ChatCompletionRole as ChatCompletionRole
+from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio
+from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .completion_list_params import CompletionListParams as CompletionListParams
+from .parsed_chat_completion import (
+ ParsedChoice as ParsedChoice,
+ ParsedChatCompletion as ParsedChatCompletion,
+ ParsedChatCompletionMessage as ParsedChatCompletionMessage,
+)
+from .chat_completion_deleted import ChatCompletionDeleted as ChatCompletionDeleted
+from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
+from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality
+from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams
+from .parsed_function_tool_call import (
+ ParsedFunction as ParsedFunction,
+ ParsedFunctionToolCall as ParsedFunctionToolCall,
+)
+from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
+from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam
+from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
+from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
+from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort
+from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall
+from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam
+from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam
+from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam
+from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam
+from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam
+from .chat_completion_function_message_param import (
+ ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,
+)
+from .chat_completion_assistant_message_param import (
+ ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,
+)
+from .chat_completion_content_part_text_param import (
+ ChatCompletionContentPartTextParam as ChatCompletionContentPartTextParam,
+)
+from .chat_completion_developer_message_param import (
+ ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,
+)
+from .chat_completion_message_tool_call_param import (
+ ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam,
+)
+from .chat_completion_named_tool_choice_param import (
+ ChatCompletionNamedToolChoiceParam as ChatCompletionNamedToolChoiceParam,
+)
+from .chat_completion_content_part_image_param import (
+ ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam,
+)
+from .chat_completion_prediction_content_param import (
+ ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam,
+)
+from .chat_completion_tool_choice_option_param import (
+ ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam,
+)
+from .chat_completion_content_part_refusal_param import (
+ ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam,
+)
+from .chat_completion_function_call_option_param import (
+ ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam,
+)
+from .chat_completion_content_part_input_audio_param import (
+ ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py
new file mode 100644
index 00000000..cb812a27
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py
@@ -0,0 +1,73 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..completion_usage import CompletionUsage
+from .chat_completion_message import ChatCompletionMessage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
+
+
+class ChoiceLogprobs(BaseModel):
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message content tokens with log probability information."""
+
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message refusal tokens with log probability information."""
+
+
+class Choice(BaseModel):
+ finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, `length` if the maximum number of tokens specified in the request was
+ reached, `content_filter` if content was omitted due to a flag from our content
+ filters, `tool_calls` if the model called a tool, or `function_call`
+ (deprecated) if the model called a function.
+ """
+
+ index: int
+ """The index of the choice in the list of choices."""
+
+ logprobs: Optional[ChoiceLogprobs] = None
+ """Log probability information for the choice."""
+
+ message: ChatCompletionMessage
+ """A chat completion message generated by the model."""
+
+
+class ChatCompletion(BaseModel):
+ id: str
+ """A unique identifier for the chat completion."""
+
+ choices: List[Choice]
+ """A list of chat completion choices.
+
+ Can be more than one if `n` is greater than 1.
+ """
+
+ created: int
+ """The Unix timestamp (in seconds) of when the chat completion was created."""
+
+ model: str
+ """The model used for the chat completion."""
+
+ object: Literal["chat.completion"]
+ """The object type, which is always `chat.completion`."""
+
+ service_tier: Optional[Literal["scale", "default"]] = None
+ """The service tier used for processing the request."""
+
+ system_fingerprint: Optional[str] = None
+ """This fingerprint represents the backend configuration that the model runs with.
+
+ Can be used in conjunction with the `seed` request parameter to understand when
+ backend changes have been made that might impact determinism.
+ """
+
+ usage: Optional[CompletionUsage] = None
+ """Usage statistics for the completion request."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py
new file mode 100644
index 00000000..35e3a3d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py
@@ -0,0 +1,70 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam
+from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam
+
+__all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"]
+
+
+class Audio(TypedDict, total=False):
+ id: Required[str]
+ """Unique identifier for a previous audio response from the model."""
+
+
+ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam]
+
+
+class FunctionCall(TypedDict, total=False):
+ arguments: Required[str]
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class ChatCompletionAssistantMessageParam(TypedDict, total=False):
+ role: Required[Literal["assistant"]]
+ """The role of the messages author, in this case `assistant`."""
+
+ audio: Optional[Audio]
+ """Data about a previous audio response from the model.
+
+ [Learn more](https://platform.openai.com/docs/guides/audio).
+ """
+
+ content: Union[str, Iterable[ContentArrayOfContentPart], None]
+ """The contents of the assistant message.
+
+ Required unless `tool_calls` or `function_call` is specified.
+ """
+
+ function_call: Optional[FunctionCall]
+ """Deprecated and replaced by `tool_calls`.
+
+ The name and arguments of a function that should be called, as generated by the
+ model.
+ """
+
+ name: str
+ """An optional name for the participant.
+
+ Provides the model information to differentiate between participants of the same
+ role.
+ """
+
+ refusal: Optional[str]
+ """The refusal message by the assistant."""
+
+ tool_calls: Iterable[ChatCompletionMessageToolCallParam]
+ """The tool calls generated by the model, such as function calls."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py
new file mode 100644
index 00000000..dd15508e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionAudio"]
+
+
+class ChatCompletionAudio(BaseModel):
+ id: str
+ """Unique identifier for this audio response."""
+
+ data: str
+ """
+ Base64 encoded audio bytes generated by the model, in the format specified in
+ the request.
+ """
+
+ expires_at: int
+ """
+ The Unix timestamp (in seconds) for when this audio response will no longer be
+ accessible on the server for use in multi-turn conversations.
+ """
+
+ transcript: str
+ """Transcript of the audio generated by the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py
new file mode 100644
index 00000000..63214178
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionAudioParam"]
+
+
+class ChatCompletionAudioParam(TypedDict, total=False):
+ format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]]
+ """Specifies the output audio format.
+
+ Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
+ """
+
+ voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
+ """The voice the model uses to respond.
+
+ Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and
+ `shimmer`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py
new file mode 100644
index 00000000..31b9cb54
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py
@@ -0,0 +1,150 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..completion_usage import CompletionUsage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = [
+ "ChatCompletionChunk",
+ "Choice",
+ "ChoiceDelta",
+ "ChoiceDeltaFunctionCall",
+ "ChoiceDeltaToolCall",
+ "ChoiceDeltaToolCallFunction",
+ "ChoiceLogprobs",
+]
+
+
+class ChoiceDeltaFunctionCall(BaseModel):
+ arguments: Optional[str] = None
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Optional[str] = None
+ """The name of the function to call."""
+
+
+class ChoiceDeltaToolCallFunction(BaseModel):
+ arguments: Optional[str] = None
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Optional[str] = None
+ """The name of the function to call."""
+
+
+class ChoiceDeltaToolCall(BaseModel):
+ index: int
+
+ id: Optional[str] = None
+ """The ID of the tool call."""
+
+ function: Optional[ChoiceDeltaToolCallFunction] = None
+
+ type: Optional[Literal["function"]] = None
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class ChoiceDelta(BaseModel):
+ content: Optional[str] = None
+ """The contents of the chunk message."""
+
+ function_call: Optional[ChoiceDeltaFunctionCall] = None
+ """Deprecated and replaced by `tool_calls`.
+
+ The name and arguments of a function that should be called, as generated by the
+ model.
+ """
+
+ refusal: Optional[str] = None
+ """The refusal message generated by the model."""
+
+ role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None
+ """The role of the author of this message."""
+
+ tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
+
+
+class ChoiceLogprobs(BaseModel):
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message content tokens with log probability information."""
+
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message refusal tokens with log probability information."""
+
+
+class Choice(BaseModel):
+ delta: ChoiceDelta
+ """A chat completion delta generated by streamed model responses."""
+
+ finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter", "function_call"]] = None
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, `length` if the maximum number of tokens specified in the request was
+ reached, `content_filter` if content was omitted due to a flag from our content
+ filters, `tool_calls` if the model called a tool, or `function_call`
+ (deprecated) if the model called a function.
+ """
+
+ index: int
+ """The index of the choice in the list of choices."""
+
+ logprobs: Optional[ChoiceLogprobs] = None
+ """Log probability information for the choice."""
+
+
+class ChatCompletionChunk(BaseModel):
+ id: str
+ """A unique identifier for the chat completion. Each chunk has the same ID."""
+
+ choices: List[Choice]
+ """A list of chat completion choices.
+
+ Can contain more than one elements if `n` is greater than 1. Can also be empty
+ for the last chunk if you set `stream_options: {"include_usage": true}`.
+ """
+
+ created: int
+ """The Unix timestamp (in seconds) of when the chat completion was created.
+
+ Each chunk has the same timestamp.
+ """
+
+ model: str
+ """The model to generate the completion."""
+
+ object: Literal["chat.completion.chunk"]
+ """The object type, which is always `chat.completion.chunk`."""
+
+ service_tier: Optional[Literal["scale", "default"]] = None
+ """The service tier used for processing the request."""
+
+ system_fingerprint: Optional[str] = None
+ """
+ This fingerprint represents the backend configuration that the model runs with.
+ Can be used in conjunction with the `seed` request parameter to understand when
+ backend changes have been made that might impact determinism.
+ """
+
+ usage: Optional[CompletionUsage] = None
+ """
+ An optional field that will only be present when you set
+ `stream_options: {"include_usage": true}` in your request. When present, it
+ contains a null value **except for the last chunk** which contains the token
+ usage statistics for the entire request.
+
+ **NOTE:** If the stream is interrupted or cancelled, you may not receive the
+ final usage chunk which contains the total token usage for the request.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py
new file mode 100644
index 00000000..9d407324
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartImageParam", "ImageURL"]
+
+
+class ImageURL(TypedDict, total=False):
+ url: Required[str]
+ """Either a URL of the image or the base64 encoded image data."""
+
+ detail: Literal["auto", "low", "high"]
+ """Specifies the detail level of the image.
+
+ Learn more in the
+ [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
+ """
+
+
+class ChatCompletionContentPartImageParam(TypedDict, total=False):
+ image_url: Required[ImageURL]
+
+ type: Required[Literal["image_url"]]
+ """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py
new file mode 100644
index 00000000..0b1b1a80
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"]
+
+
+class InputAudio(TypedDict, total=False):
+ data: Required[str]
+ """Base64 encoded audio data."""
+
+ format: Required[Literal["wav", "mp3"]]
+ """The format of the encoded audio data. Currently supports "wav" and "mp3"."""
+
+
+class ChatCompletionContentPartInputAudioParam(TypedDict, total=False):
+ input_audio: Required[InputAudio]
+
+ type: Required[Literal["input_audio"]]
+ """The type of the content part. Always `input_audio`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py
new file mode 100644
index 00000000..cbedc853
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam
+from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam
+
+__all__ = ["ChatCompletionContentPartParam", "File", "FileFile"]
+
+
+class FileFile(TypedDict, total=False):
+ file_data: str
+ """
+ The base64 encoded file data, used when passing the file to the model as a
+ string.
+ """
+
+ file_id: str
+ """The ID of an uploaded file to use as input."""
+
+ filename: str
+ """The name of the file, used when passing the file to the model as a string."""
+
+
+class File(TypedDict, total=False):
+ file: Required[FileFile]
+
+ type: Required[Literal["file"]]
+ """The type of the content part. Always `file`."""
+
+
+ChatCompletionContentPartParam: TypeAlias = Union[
+ ChatCompletionContentPartTextParam,
+ ChatCompletionContentPartImageParam,
+ ChatCompletionContentPartInputAudioParam,
+ File,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py
new file mode 100644
index 00000000..c18c7db7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartRefusalParam"]
+
+
+class ChatCompletionContentPartRefusalParam(TypedDict, total=False):
+ refusal: Required[str]
+ """The refusal message generated by the model."""
+
+ type: Required[Literal["refusal"]]
+ """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py
new file mode 100644
index 00000000..a2707444
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartTextParam"]
+
+
+class ChatCompletionContentPartTextParam(TypedDict, total=False):
+ text: Required[str]
+ """The text content."""
+
+ type: Required[Literal["text"]]
+ """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py
new file mode 100644
index 00000000..0a541cb2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionDeleted"]
+
+
+class ChatCompletionDeleted(BaseModel):
+ id: str
+ """The ID of the chat completion that was deleted."""
+
+ deleted: bool
+ """Whether the chat completion was deleted."""
+
+ object: Literal["chat.completion.deleted"]
+ """The type of object being deleted."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py
new file mode 100644
index 00000000..01e4fdb6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionDeveloperMessageParam"]
+
+
+class ChatCompletionDeveloperMessageParam(TypedDict, total=False):
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+ """The contents of the developer message."""
+
+ role: Required[Literal["developer"]]
+ """The role of the messages author, in this case `developer`."""
+
+ name: str
+ """An optional name for the participant.
+
+ Provides the model information to differentiate between participants of the same
+ role.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py
new file mode 100644
index 00000000..2bc014af
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ChatCompletionFunctionCallOptionParam"]
+
+
+class ChatCompletionFunctionCallOptionParam(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py
new file mode 100644
index 00000000..5af12bf9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionFunctionMessageParam"]
+
+
+class ChatCompletionFunctionMessageParam(TypedDict, total=False):
+ content: Required[Optional[str]]
+ """The contents of the function message."""
+
+ name: Required[str]
+ """The name of the function to call."""
+
+ role: Required[Literal["function"]]
+ """The role of the messages author, in this case `function`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py
new file mode 100644
index 00000000..c659ac3d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py
@@ -0,0 +1,79 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .chat_completion_audio import ChatCompletionAudio
+from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
+
+__all__ = ["ChatCompletionMessage", "Annotation", "AnnotationURLCitation", "FunctionCall"]
+
+
+class AnnotationURLCitation(BaseModel):
+ end_index: int
+ """The index of the last character of the URL citation in the message."""
+
+ start_index: int
+ """The index of the first character of the URL citation in the message."""
+
+ title: str
+ """The title of the web resource."""
+
+ url: str
+ """The URL of the web resource."""
+
+
+class Annotation(BaseModel):
+ type: Literal["url_citation"]
+ """The type of the URL citation. Always `url_citation`."""
+
+ url_citation: AnnotationURLCitation
+ """A URL citation when using web search."""
+
+
+class FunctionCall(BaseModel):
+ arguments: str
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: str
+ """The name of the function to call."""
+
+
+class ChatCompletionMessage(BaseModel):
+ content: Optional[str] = None
+ """The contents of the message."""
+
+ refusal: Optional[str] = None
+ """The refusal message generated by the model."""
+
+ role: Literal["assistant"]
+ """The role of the author of this message."""
+
+ annotations: Optional[List[Annotation]] = None
+ """
+ Annotations for the message, when applicable, as when using the
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ """
+
+ audio: Optional[ChatCompletionAudio] = None
+ """
+ If the audio output modality is requested, this object contains data about the
+ audio response from the model.
+ [Learn more](https://platform.openai.com/docs/guides/audio).
+ """
+
+ function_call: Optional[FunctionCall] = None
+ """Deprecated and replaced by `tool_calls`.
+
+ The name and arguments of a function that should be called, as generated by the
+ model.
+ """
+
+ tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
+ """The tool calls generated by the model, such as function calls."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py
new file mode 100644
index 00000000..942da243
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .chat_completion_tool_message_param import ChatCompletionToolMessageParam
+from .chat_completion_user_message_param import ChatCompletionUserMessageParam
+from .chat_completion_system_message_param import ChatCompletionSystemMessageParam
+from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam
+from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
+from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam
+
+__all__ = ["ChatCompletionMessageParam"]
+
+ChatCompletionMessageParam: TypeAlias = Union[
+ ChatCompletionDeveloperMessageParam,
+ ChatCompletionSystemMessageParam,
+ ChatCompletionUserMessageParam,
+ ChatCompletionAssistantMessageParam,
+ ChatCompletionToolMessageParam,
+ ChatCompletionFunctionMessageParam,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py
new file mode 100644
index 00000000..4fec6670
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionMessageToolCall", "Function"]
+
+
+class Function(BaseModel):
+ arguments: str
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: str
+ """The name of the function to call."""
+
+
+class ChatCompletionMessageToolCall(BaseModel):
+ id: str
+ """The ID of the tool call."""
+
+ function: Function
+ """The function that the model called."""
+
+ type: Literal["function"]
+ """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py
new file mode 100644
index 00000000..f616c363
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionMessageToolCallParam", "Function"]
+
+
+class Function(TypedDict, total=False):
+ arguments: Required[str]
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class ChatCompletionMessageToolCallParam(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the tool call."""
+
+ function: Required[Function]
+ """The function that the model called."""
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py
new file mode 100644
index 00000000..8e3c1459
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatCompletionModality"]
+
+ChatCompletionModality: TypeAlias = Literal["text", "audio"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py
new file mode 100644
index 00000000..369f8b42
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionNamedToolChoiceParam", "Function"]
+
+
+class Function(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class ChatCompletionNamedToolChoiceParam(TypedDict, total=False):
+ function: Required[Function]
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py
new file mode 100644
index 00000000..c44e6e36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionPredictionContentParam"]
+
+
+class ChatCompletionPredictionContentParam(TypedDict, total=False):
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+ """
+ The content that should be matched when generating a model response. If
+ generated tokens would match this content, the entire model response can be
+ returned much more quickly.
+ """
+
+ type: Required[Literal["content"]]
+ """The type of the predicted content you want to provide.
+
+ This type is currently always `content`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py
new file mode 100644
index 00000000..e4785c90
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..shared.reasoning_effort import ReasoningEffort
+
+__all__ = ["ChatCompletionReasoningEffort"]
+
+ChatCompletionReasoningEffort = ReasoningEffort
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py
new file mode 100644
index 00000000..3ec5e9ad
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatCompletionRole"]
+
+ChatCompletionRole: TypeAlias = Literal["developer", "system", "user", "assistant", "tool", "function"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py
new file mode 100644
index 00000000..95adc08a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .chat_completion_message import ChatCompletionMessage
+
+__all__ = ["ChatCompletionStoreMessage"]
+
+
+class ChatCompletionStoreMessage(ChatCompletionMessage):
+ id: str
+ """The identifier of the chat message."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py
new file mode 100644
index 00000000..471e0eba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ChatCompletionStreamOptionsParam"]
+
+
+class ChatCompletionStreamOptionsParam(TypedDict, total=False):
+ include_usage: bool
+ """If set, an additional chunk will be streamed before the `data: [DONE]` message.
+
+ The `usage` field on this chunk shows the token usage statistics for the entire
+ request, and the `choices` field will always be an empty array.
+
+ All other chunks will also include a `usage` field, but with a null value.
+ **NOTE:** If the stream is interrupted, you may not receive the final usage
+ chunk which contains the total token usage for the request.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py
new file mode 100644
index 00000000..172ccea0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionSystemMessageParam"]
+
+
+class ChatCompletionSystemMessageParam(TypedDict, total=False):
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+ """The contents of the system message."""
+
+ role: Required[Literal["system"]]
+ """The role of the messages author, in this case `system`."""
+
+ name: str
+ """An optional name for the participant.
+
+ Provides the model information to differentiate between participants of the same
+ role.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py
new file mode 100644
index 00000000..c69e2589
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py
@@ -0,0 +1,57 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"]
+
+
+class TopLogprob(BaseModel):
+ token: str
+ """The token."""
+
+ bytes: Optional[List[int]] = None
+ """A list of integers representing the UTF-8 bytes representation of the token.
+
+ Useful in instances where characters are represented by multiple tokens and
+ their byte representations must be combined to generate the correct text
+ representation. Can be `null` if there is no bytes representation for the token.
+ """
+
+ logprob: float
+ """The log probability of this token, if it is within the top 20 most likely
+ tokens.
+
+ Otherwise, the value `-9999.0` is used to signify that the token is very
+ unlikely.
+ """
+
+
+class ChatCompletionTokenLogprob(BaseModel):
+ token: str
+ """The token."""
+
+ bytes: Optional[List[int]] = None
+ """A list of integers representing the UTF-8 bytes representation of the token.
+
+ Useful in instances where characters are represented by multiple tokens and
+ their byte representations must be combined to generate the correct text
+ representation. Can be `null` if there is no bytes representation for the token.
+ """
+
+ logprob: float
+ """The log probability of this token, if it is within the top 20 most likely
+ tokens.
+
+ Otherwise, the value `-9999.0` is used to signify that the token is very
+ unlikely.
+ """
+
+ top_logprobs: List[TopLogprob]
+ """List of the most likely tokens and their log probability, at this token
+ position.
+
+ In rare cases, there may be fewer than the number of requested `top_logprobs`
+ returned.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py
new file mode 100644
index 00000000..7dedf041
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam
+
+__all__ = ["ChatCompletionToolChoiceOptionParam"]
+
+ChatCompletionToolChoiceOptionParam: TypeAlias = Union[
+ Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py
new file mode 100644
index 00000000..eb5e270e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionToolMessageParam"]
+
+
+class ChatCompletionToolMessageParam(TypedDict, total=False):
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+ """The contents of the tool message."""
+
+ role: Required[Literal["tool"]]
+ """The role of the messages author, in this case `tool`."""
+
+ tool_call_id: Required[str]
+ """Tool call that this message is responding to."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py
new file mode 100644
index 00000000..6c2b1a36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.function_definition import FunctionDefinition
+
+__all__ = ["ChatCompletionToolParam"]
+
+
+class ChatCompletionToolParam(TypedDict, total=False):
+ function: Required[FunctionDefinition]
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py
new file mode 100644
index 00000000..5c15322a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_param import ChatCompletionContentPartParam
+
+__all__ = ["ChatCompletionUserMessageParam"]
+
+
+class ChatCompletionUserMessageParam(TypedDict, total=False):
+ content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]]
+ """The contents of the user message."""
+
+ role: Required[Literal["user"]]
+ """The role of the messages author, in this case `user`."""
+
+ name: str
+ """An optional name for the participant.
+
+ Provides the model information to differentiate between participants of the same
+ role.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py
new file mode 100644
index 00000000..05103fba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py
@@ -0,0 +1,404 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared.chat_model import ChatModel
+from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from .chat_completion_tool_param import ChatCompletionToolParam
+from .chat_completion_audio_param import ChatCompletionAudioParam
+from .chat_completion_message_param import ChatCompletionMessageParam
+from ..shared_params.function_parameters import FunctionParameters
+from ..shared_params.response_format_text import ResponseFormatText
+from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from .chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
+from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
+from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
+from .chat_completion_function_call_option_param import ChatCompletionFunctionCallOptionParam
+
+__all__ = [
+ "CompletionCreateParamsBase",
+ "FunctionCall",
+ "Function",
+ "ResponseFormat",
+ "WebSearchOptions",
+ "WebSearchOptionsUserLocation",
+ "WebSearchOptionsUserLocationApproximate",
+ "CompletionCreateParamsNonStreaming",
+ "CompletionCreateParamsStreaming",
+]
+
+
+class CompletionCreateParamsBase(TypedDict, total=False):
+ messages: Required[Iterable[ChatCompletionMessageParam]]
+ """A list of messages comprising the conversation so far.
+
+ Depending on the [model](https://platform.openai.com/docs/models) you use,
+ different message types (modalities) are supported, like
+ [text](https://platform.openai.com/docs/guides/text-generation),
+ [images](https://platform.openai.com/docs/guides/vision), and
+ [audio](https://platform.openai.com/docs/guides/audio).
+ """
+
+ model: Required[Union[str, ChatModel]]
+ """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+ OpenAI offers a wide range of models with different capabilities, performance
+ characteristics, and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+ """
+
+ audio: Optional[ChatCompletionAudioParam]
+ """Parameters for audio output.
+
+ Required when audio output is requested with `modalities: ["audio"]`.
+ [Learn more](https://platform.openai.com/docs/guides/audio).
+ """
+
+ frequency_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on their existing frequency in the
+ text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ """
+
+ function_call: FunctionCall
+ """Deprecated in favor of `tool_choice`.
+
+ Controls which (if any) function is called by the model.
+
+ `none` means the model will not call a function and instead generates a message.
+
+ `auto` means the model can pick between generating a message or calling a
+ function.
+
+ Specifying a particular function via `{"name": "my_function"}` forces the model
+ to call that function.
+
+ `none` is the default when no functions are present. `auto` is the default if
+ functions are present.
+ """
+
+ functions: Iterable[Function]
+ """Deprecated in favor of `tools`.
+
+ A list of functions the model may generate JSON inputs for.
+ """
+
+ logit_bias: Optional[Dict[str, int]]
+ """Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+ """
+
+ logprobs: Optional[bool]
+ """Whether to return log probabilities of the output tokens or not.
+
+ If true, returns the log probabilities of each output token returned in the
+ `content` of `message`.
+ """
+
+ max_completion_tokens: Optional[int]
+ """
+ An upper bound for the number of tokens that can be generated for a completion,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ max_tokens: Optional[int]
+ """
+ The maximum number of [tokens](/tokenizer) that can be generated in the chat
+ completion. This value can be used to control
+ [costs](https://openai.com/api/pricing/) for text generated via API.
+
+ This value is now deprecated in favor of `max_completion_tokens`, and is not
+ compatible with
+ [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]]
+ """
+ Output types that you would like the model to generate. Most models are capable
+ of generating text, which is the default:
+
+ `["text"]`
+
+ The `gpt-4o-audio-preview` model can also be used to
+ [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+ this model generate both text and audio responses, you can use:
+
+ `["text", "audio"]`
+ """
+
+ n: Optional[int]
+ """How many chat completion choices to generate for each input message.
+
+ Note that you will be charged based on the number of generated tokens across all
+ of the choices. Keep `n` as `1` to minimize costs.
+ """
+
+ parallel_tool_calls: bool
+ """
+ Whether to enable
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+ during tool use.
+ """
+
+ prediction: Optional[ChatCompletionPredictionContentParam]
+ """
+ Static predicted output content, such as the content of a text file that is
+ being regenerated.
+ """
+
+ presence_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on whether they appear in the text so
+ far, increasing the model's likelihood to talk about new topics.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort]
+ """**o-series models only**
+
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+ result in faster responses and fewer tokens used on reasoning in a response.
+ """
+
+ response_format: ResponseFormat
+ """An object specifying the format that the model must output.
+
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
+ in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+ ensures the message the model generates is valid JSON. Using `json_schema` is
+ preferred for models that support it.
+ """
+
+ seed: Optional[int]
+ """
+ This feature is in Beta. If specified, our system will make a best effort to
+ sample deterministically, such that repeated requests with the same `seed` and
+ parameters should return the same result. Determinism is not guaranteed, and you
+ should refer to the `system_fingerprint` response parameter to monitor changes
+ in the backend.
+ """
+
+ service_tier: Optional[Literal["auto", "default"]]
+ """Specifies the latency tier to use for processing the request.
+
+ This parameter is relevant for customers subscribed to the scale tier service:
+
+ - If set to 'auto', and the Project is Scale tier enabled, the system will
+ utilize scale tier credits until they are exhausted.
+ - If set to 'auto', and the Project is not Scale tier enabled, the request will
+ be processed using the default service tier with a lower uptime SLA and no
+ latency guarentee.
+ - If set to 'default', the request will be processed using the default service
+ tier with a lower uptime SLA and no latency guarentee.
+ - When not set, the default behavior is 'auto'.
+
+ When this parameter is set, the response body will include the `service_tier`
+ utilized.
+ """
+
+ stop: Union[Optional[str], List[str], None]
+ """Up to 4 sequences where the API will stop generating further tokens.
+
+ The returned text will not contain the stop sequence.
+ """
+
+ store: Optional[bool]
+ """
+ Whether or not to store the output of this chat completion request for use in
+ our [model distillation](https://platform.openai.com/docs/guides/distillation)
+ or [evals](https://platform.openai.com/docs/guides/evals) products.
+ """
+
+ stream_options: Optional[ChatCompletionStreamOptionsParam]
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ tool_choice: ChatCompletionToolChoiceOptionParam
+ """
+ Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+ """
+
+ tools: Iterable[ChatCompletionToolParam]
+ """A list of tools the model may call.
+
+ Currently, only functions are supported as a tool. Use this to provide a list of
+ functions the model may generate JSON inputs for. A max of 128 functions are
+ supported.
+ """
+
+ top_logprobs: Optional[int]
+ """
+ An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
+
+ web_search_options: WebSearchOptions
+ """
+ This tool searches the web for relevant results to use in a response. Learn more
+ about the
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ """
+
+
+FunctionCall: TypeAlias = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam]
+
+
+class Function(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to be called.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ description: str
+ """
+ A description of what the function does, used by the model to choose when and
+ how to call the function.
+ """
+
+ parameters: FunctionParameters
+ """The parameters the functions accepts, described as a JSON Schema object.
+
+ See the [guide](https://platform.openai.com/docs/guides/function-calling) for
+ examples, and the
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ documentation about the format.
+
+ Omitting `parameters` defines a function with an empty parameter list.
+ """
+
+
+ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
+
+
+class WebSearchOptionsUserLocationApproximate(TypedDict, total=False):
+ city: str
+ """Free text input for the city of the user, e.g. `San Francisco`."""
+
+ country: str
+ """
+ The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+ the user, e.g. `US`.
+ """
+
+ region: str
+ """Free text input for the region of the user, e.g. `California`."""
+
+ timezone: str
+ """
+ The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+ user, e.g. `America/Los_Angeles`.
+ """
+
+
+class WebSearchOptionsUserLocation(TypedDict, total=False):
+ approximate: Required[WebSearchOptionsUserLocationApproximate]
+ """Approximate location parameters for the search."""
+
+ type: Required[Literal["approximate"]]
+ """The type of location approximation. Always `approximate`."""
+
+
+class WebSearchOptions(TypedDict, total=False):
+ search_context_size: Literal["low", "medium", "high"]
+ """
+ High level guidance for the amount of context window space to use for the
+ search. One of `low`, `medium`, or `high`. `medium` is the default.
+ """
+
+ user_location: Optional[WebSearchOptionsUserLocation]
+ """Approximate location parameters for the search."""
+
+
+class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+ for more information, along with the
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+ guide for more information on how to handle the streaming events.
+ """
+
+
+class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+ for more information, along with the
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+ guide for more information on how to handle the streaming events.
+ """
+
+
+CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py
new file mode 100644
index 00000000..d93da834
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = ["CompletionListParams"]
+
+
+class CompletionListParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last chat completion from the previous pagination request."""
+
+ limit: int
+ """Number of Chat Completions to retrieve."""
+
+ metadata: Optional[Metadata]
+ """A list of metadata keys to filter the Chat Completions by. Example:
+
+ `metadata[key1]=value1&metadata[key2]=value2`
+ """
+
+ model: str
+ """The model used to generate the Chat Completions."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for Chat Completions by timestamp.
+
+ Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py
new file mode 100644
index 00000000..fc71733f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = ["CompletionUpdateParams"]
+
+
+class CompletionUpdateParams(TypedDict, total=False):
+ metadata: Required[Optional[Metadata]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py
new file mode 100644
index 00000000..b8e62d6a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py
@@ -0,0 +1,5 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .message_list_params import MessageListParams as MessageListParams
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py
new file mode 100644
index 00000000..4e694e83
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["MessageListParams"]
+
+
+class MessageListParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last message from the previous pagination request."""
+
+ limit: int
+ """Number of messages to retrieve."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for messages by timestamp.
+
+ Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py
new file mode 100644
index 00000000..4b11dac5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Generic, TypeVar, Optional
+
+from ..._models import GenericModel
+from .chat_completion import Choice, ChatCompletion
+from .chat_completion_message import ChatCompletionMessage
+from .parsed_function_tool_call import ParsedFunctionToolCall
+
+__all__ = ["ParsedChatCompletion", "ParsedChoice"]
+
+
+ContentType = TypeVar("ContentType")
+
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedChatCompletionMessage(ChatCompletionMessage, GenericModel, Generic[ContentType]):
+ parsed: Optional[ContentType] = None
+ """The auto-parsed message contents"""
+
+ tool_calls: Optional[List[ParsedFunctionToolCall]] = None # type: ignore[assignment]
+ """The tool calls generated by the model, such as function calls."""
+
+
+class ParsedChoice(Choice, GenericModel, Generic[ContentType]):
+ message: ParsedChatCompletionMessage[ContentType]
+ """A chat completion message generated by the model."""
+
+
+class ParsedChatCompletion(ChatCompletion, GenericModel, Generic[ContentType]):
+ choices: List[ParsedChoice[ContentType]] # type: ignore[assignment]
+ """A list of chat completion choices.
+
+ Can be more than one if `n` is greater than 1.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py
new file mode 100644
index 00000000..3e90789f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall
+
+__all__ = ["ParsedFunctionToolCall", "ParsedFunction"]
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedFunction(Function):
+ parsed_arguments: Optional[object] = None
+ """
+ The arguments to call the function with.
+
+ If you used `openai.pydantic_function_tool()` then this will be an
+ instance of the given `BaseModel`.
+
+ Otherwise, this will be the parsed JSON arguments.
+ """
+
+
+class ParsedFunctionToolCall(ChatCompletionMessageToolCall):
+ function: ParsedFunction
+ """The function that the model called."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat_model.py b/.venv/lib/python3.12/site-packages/openai/types/chat_model.py
new file mode 100644
index 00000000..9304d195
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat_model.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .shared import chat_model
+
+__all__ = ["ChatModel"]
+
+ChatModel = chat_model.ChatModel
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion.py b/.venv/lib/python3.12/site-packages/openai/types/completion.py
new file mode 100644
index 00000000..d3b3102a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .completion_usage import CompletionUsage
+from .completion_choice import CompletionChoice
+
+__all__ = ["Completion"]
+
+
+class Completion(BaseModel):
+ id: str
+ """A unique identifier for the completion."""
+
+ choices: List[CompletionChoice]
+ """The list of completion choices the model generated for the input prompt."""
+
+ created: int
+ """The Unix timestamp (in seconds) of when the completion was created."""
+
+ model: str
+ """The model used for completion."""
+
+ object: Literal["text_completion"]
+ """The object type, which is always "text_completion" """
+
+ system_fingerprint: Optional[str] = None
+ """This fingerprint represents the backend configuration that the model runs with.
+
+ Can be used in conjunction with the `seed` request parameter to understand when
+ backend changes have been made that might impact determinism.
+ """
+
+ usage: Optional[CompletionUsage] = None
+ """Usage statistics for the completion request."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion_choice.py b/.venv/lib/python3.12/site-packages/openai/types/completion_choice.py
new file mode 100644
index 00000000..d948ebc9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion_choice.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["CompletionChoice", "Logprobs"]
+
+
+class Logprobs(BaseModel):
+ text_offset: Optional[List[int]] = None
+
+ token_logprobs: Optional[List[float]] = None
+
+ tokens: Optional[List[str]] = None
+
+ top_logprobs: Optional[List[Dict[str, float]]] = None
+
+
+class CompletionChoice(BaseModel):
+ finish_reason: Literal["stop", "length", "content_filter"]
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, `length` if the maximum number of tokens specified in the request was
+ reached, or `content_filter` if content was omitted due to a flag from our
+ content filters.
+ """
+
+ index: int
+
+ logprobs: Optional[Logprobs] = None
+
+ text: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py
new file mode 100644
index 00000000..fdb1680d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py
@@ -0,0 +1,187 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+
+__all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"]
+
+
+class CompletionCreateParamsBase(TypedDict, total=False):
+ model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
+ """ID of the model to use.
+
+ You can use the
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ see all of your available models, or see our
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
+ """
+
+ prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]]
+ """
+ The prompt(s) to generate completions for, encoded as a string, array of
+ strings, array of tokens, or array of token arrays.
+
+ Note that <|endoftext|> is the document separator that the model sees during
+ training, so if a prompt is not specified the model will generate as if from the
+ beginning of a new document.
+ """
+
+ best_of: Optional[int]
+ """
+ Generates `best_of` completions server-side and returns the "best" (the one with
+ the highest log probability per token). Results cannot be streamed.
+
+ When used with `n`, `best_of` controls the number of candidate completions and
+ `n` specifies how many to return – `best_of` must be greater than `n`.
+
+ **Note:** Because this parameter generates many completions, it can quickly
+ consume your token quota. Use carefully and ensure that you have reasonable
+ settings for `max_tokens` and `stop`.
+ """
+
+ echo: Optional[bool]
+ """Echo back the prompt in addition to the completion"""
+
+ frequency_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on their existing frequency in the
+ text so far, decreasing the model's likelihood to repeat the same line verbatim.
+
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+ """
+
+ logit_bias: Optional[Dict[str, int]]
+ """Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+ tokenizer) to an associated bias value from -100 to 100. You can use this
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
+
+ As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+ from being generated.
+ """
+
+ logprobs: Optional[int]
+ """
+ Include the log probabilities on the `logprobs` most likely output tokens, as
+ well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+ list of the 5 most likely tokens. The API will always return the `logprob` of
+ the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+ The maximum value for `logprobs` is 5.
+ """
+
+ max_tokens: Optional[int]
+ """
+ The maximum number of [tokens](/tokenizer) that can be generated in the
+ completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+ [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+ for counting tokens.
+ """
+
+ n: Optional[int]
+ """How many completions to generate for each prompt.
+
+ **Note:** Because this parameter generates many completions, it can quickly
+ consume your token quota. Use carefully and ensure that you have reasonable
+ settings for `max_tokens` and `stop`.
+ """
+
+ presence_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on whether they appear in the text so
+ far, increasing the model's likelihood to talk about new topics.
+
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+ """
+
+ seed: Optional[int]
+ """
+ If specified, our system will make a best effort to sample deterministically,
+ such that repeated requests with the same `seed` and parameters should return
+ the same result.
+
+ Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+ response parameter to monitor changes in the backend.
+ """
+
+ stop: Union[Optional[str], List[str], None]
+ """Up to 4 sequences where the API will stop generating further tokens.
+
+ The returned text will not contain the stop sequence.
+ """
+
+ stream_options: Optional[ChatCompletionStreamOptionsParam]
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ suffix: Optional[str]
+ """The suffix that comes after a completion of inserted text.
+
+ This parameter is only supported for `gpt-3.5-turbo-instruct`.
+ """
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+
+ We generally recommend altering this or `top_p` but not both.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
+
+
+class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """Whether to stream back partial progress.
+
+ If set, tokens will be sent as data-only
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+ as they become available, with the stream terminated by a `data: [DONE]`
+ message.
+ [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+ """
+
+
+class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
+ stream: Required[Literal[True]]
+ """Whether to stream back partial progress.
+
+ If set, tokens will be sent as data-only
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+ as they become available, with the stream terminated by a `data: [DONE]`
+ message.
+ [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+ """
+
+
+CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion_usage.py b/.venv/lib/python3.12/site-packages/openai/types/completion_usage.py
new file mode 100644
index 00000000..d8c4e84c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion_usage.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"]
+
+
+class CompletionTokensDetails(BaseModel):
+ accepted_prediction_tokens: Optional[int] = None
+ """
+ When using Predicted Outputs, the number of tokens in the prediction that
+ appeared in the completion.
+ """
+
+ audio_tokens: Optional[int] = None
+ """Audio input tokens generated by the model."""
+
+ reasoning_tokens: Optional[int] = None
+ """Tokens generated by the model for reasoning."""
+
+ rejected_prediction_tokens: Optional[int] = None
+ """
+ When using Predicted Outputs, the number of tokens in the prediction that did
+ not appear in the completion. However, like reasoning tokens, these tokens are
+ still counted in the total completion tokens for purposes of billing, output,
+ and context window limits.
+ """
+
+
+class PromptTokensDetails(BaseModel):
+ audio_tokens: Optional[int] = None
+ """Audio input tokens present in the prompt."""
+
+ cached_tokens: Optional[int] = None
+ """Cached tokens present in the prompt."""
+
+
+class CompletionUsage(BaseModel):
+ completion_tokens: int
+ """Number of tokens in the generated completion."""
+
+ prompt_tokens: int
+ """Number of tokens in the prompt."""
+
+ total_tokens: int
+ """Total number of tokens used in the request (prompt + completion)."""
+
+ completion_tokens_details: Optional[CompletionTokensDetails] = None
+ """Breakdown of tokens used in a completion."""
+
+ prompt_tokens_details: Optional[PromptTokensDetails] = None
+ """Breakdown of tokens used in the prompt."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py b/.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py
new file mode 100644
index 00000000..eff247a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .embedding import Embedding
+
+__all__ = ["CreateEmbeddingResponse", "Usage"]
+
+
+class Usage(BaseModel):
+ prompt_tokens: int
+ """The number of tokens used by the prompt."""
+
+ total_tokens: int
+ """The total number of tokens used by the request."""
+
+
+class CreateEmbeddingResponse(BaseModel):
+ data: List[Embedding]
+ """The list of embeddings generated by the model."""
+
+ model: str
+ """The name of the model used to generate the embedding."""
+
+ object: Literal["list"]
+ """The object type, which is always "list"."""
+
+ usage: Usage
+ """The usage information for the request."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/embedding.py b/.venv/lib/python3.12/site-packages/openai/types/embedding.py
new file mode 100644
index 00000000..769b1d16
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/embedding.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Embedding"]
+
+
+class Embedding(BaseModel):
+ embedding: List[float]
+ """The embedding vector, which is a list of floats.
+
+ The length of vector depends on the model as listed in the
+ [embedding guide](https://platform.openai.com/docs/guides/embeddings).
+ """
+
+ index: int
+ """The index of the embedding in the list of embeddings."""
+
+ object: Literal["embedding"]
+ """The object type, which is always "embedding"."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py
new file mode 100644
index 00000000..a9056644
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py
@@ -0,0 +1,53 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .embedding_model import EmbeddingModel
+
+__all__ = ["EmbeddingCreateParams"]
+
+
+class EmbeddingCreateParams(TypedDict, total=False):
+ input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]]
+ """Input text to embed, encoded as a string or array of tokens.
+
+ To embed multiple inputs in a single request, pass an array of strings or array
+ of token arrays. The input must not exceed the max input tokens for the model
+ (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any
+ array must be 2048 dimensions or less.
+ [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+ for counting tokens. Some models may also impose a limit on total number of
+ tokens summed across inputs.
+ """
+
+ model: Required[Union[str, EmbeddingModel]]
+ """ID of the model to use.
+
+ You can use the
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ see all of your available models, or see our
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
+ """
+
+ dimensions: int
+ """The number of dimensions the resulting output embeddings should have.
+
+ Only supported in `text-embedding-3` and later models.
+ """
+
+ encoding_format: Literal["float", "base64"]
+ """The format to return the embeddings in.
+
+ Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/embedding_model.py b/.venv/lib/python3.12/site-packages/openai/types/embedding_model.py
new file mode 100644
index 00000000..075ff976
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/embedding_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["EmbeddingModel"]
+
+EmbeddingModel: TypeAlias = Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py
new file mode 100644
index 00000000..ee96bd78
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject
+from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject
+
+__all__ = ["FileChunkingStrategy"]
+
+FileChunkingStrategy: TypeAlias = Annotated[
+ Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py
new file mode 100644
index 00000000..25d94286
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam
+from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam
+
+__all__ = ["FileChunkingStrategyParam"]
+
+FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_content.py b/.venv/lib/python3.12/site-packages/openai/types/file_content.py
new file mode 100644
index 00000000..d89eee62
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_content.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import TypeAlias
+
+__all__ = ["FileContent"]
+
+FileContent: TypeAlias = str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/file_create_params.py
new file mode 100644
index 00000000..728dfd35
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_create_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .._types import FileTypes
+from .file_purpose import FilePurpose
+
+__all__ = ["FileCreateParams"]
+
+
+class FileCreateParams(TypedDict, total=False):
+ file: Required[FileTypes]
+ """The File object (not file name) to be uploaded."""
+
+ purpose: Required[FilePurpose]
+ """The intended purpose of the uploaded file.
+
+ One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch
+ API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision
+ fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used
+ for eval data sets
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/file_deleted.py
new file mode 100644
index 00000000..f25fa87a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileDeleted"]
+
+
+class FileDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["file"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/file_list_params.py
new file mode 100644
index 00000000..058d874c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_list_params.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["FileListParams"]
+
+
+class FileListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 10,000, and the default is 10,000.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
+
+ purpose: str
+ """Only return files with the given purpose."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_object.py b/.venv/lib/python3.12/site-packages/openai/types/file_object.py
new file mode 100644
index 00000000..1d65e698
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_object.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileObject"]
+
+
+class FileObject(BaseModel):
+ id: str
+ """The file identifier, which can be referenced in the API endpoints."""
+
+ bytes: int
+ """The size of the file, in bytes."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the file was created."""
+
+ filename: str
+ """The name of the file."""
+
+ object: Literal["file"]
+ """The object type, which is always `file`."""
+
+ purpose: Literal[
+ "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision"
+ ]
+ """The intended purpose of the file.
+
+ Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`,
+ `fine-tune`, `fine-tune-results` and `vision`.
+ """
+
+ status: Literal["uploaded", "processed", "error"]
+ """Deprecated.
+
+ The current status of the file, which can be either `uploaded`, `processed`, or
+ `error`.
+ """
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the file will expire."""
+
+ status_details: Optional[str] = None
+ """Deprecated.
+
+ For details on why a fine-tuning training file failed validation, see the
+ `error` field on `fine_tuning.job`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_purpose.py b/.venv/lib/python3.12/site-packages/openai/types/file_purpose.py
new file mode 100644
index 00000000..b2c2d5f9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_purpose.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["FilePurpose"]
+
+FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py
new file mode 100644
index 00000000..92b81329
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .fine_tuning_job import FineTuningJob as FineTuningJob
+from .job_list_params import JobListParams as JobListParams
+from .job_create_params import JobCreateParams as JobCreateParams
+from .fine_tuning_job_event import FineTuningJobEvent as FineTuningJobEvent
+from .job_list_events_params import JobListEventsParams as JobListEventsParams
+from .fine_tuning_job_integration import FineTuningJobIntegration as FineTuningJobIntegration
+from .fine_tuning_job_wandb_integration import FineTuningJobWandbIntegration as FineTuningJobWandbIntegration
+from .fine_tuning_job_wandb_integration_object import (
+ FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py
new file mode 100644
index 00000000..c7fff2b7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py
@@ -0,0 +1,223 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.metadata import Metadata
+from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject
+
+__all__ = [
+ "FineTuningJob",
+ "Error",
+ "Hyperparameters",
+ "Method",
+ "MethodDpo",
+ "MethodDpoHyperparameters",
+ "MethodSupervised",
+ "MethodSupervisedHyperparameters",
+]
+
+
+class Error(BaseModel):
+ code: str
+ """A machine-readable error code."""
+
+ message: str
+ """A human-readable error message."""
+
+ param: Optional[str] = None
+ """The parameter that was invalid, usually `training_file` or `validation_file`.
+
+ This field will be null if the failure was not parameter-specific.
+ """
+
+
+class Hyperparameters(BaseModel):
+ batch_size: Union[Literal["auto"], int, None] = None
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int, None] = None
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+
+class MethodDpoHyperparameters(BaseModel):
+ batch_size: Union[Literal["auto"], int, None] = None
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ beta: Union[Literal["auto"], float, None] = None
+ """The beta value for the DPO method.
+
+ A higher beta value will increase the weight of the penalty between the policy
+ and reference model.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int, None] = None
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+
+class MethodDpo(BaseModel):
+ hyperparameters: Optional[MethodDpoHyperparameters] = None
+ """The hyperparameters used for the fine-tuning job."""
+
+
+class MethodSupervisedHyperparameters(BaseModel):
+ batch_size: Union[Literal["auto"], int, None] = None
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int, None] = None
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+
+class MethodSupervised(BaseModel):
+ hyperparameters: Optional[MethodSupervisedHyperparameters] = None
+ """The hyperparameters used for the fine-tuning job."""
+
+
+class Method(BaseModel):
+ dpo: Optional[MethodDpo] = None
+ """Configuration for the DPO fine-tuning method."""
+
+ supervised: Optional[MethodSupervised] = None
+ """Configuration for the supervised fine-tuning method."""
+
+ type: Optional[Literal["supervised", "dpo"]] = None
+ """The type of method. Is either `supervised` or `dpo`."""
+
+
+class FineTuningJob(BaseModel):
+ id: str
+ """The object identifier, which can be referenced in the API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
+
+ error: Optional[Error] = None
+ """
+ For fine-tuning jobs that have `failed`, this will contain more information on
+ the cause of the failure.
+ """
+
+ fine_tuned_model: Optional[str] = None
+ """The name of the fine-tuned model that is being created.
+
+ The value will be null if the fine-tuning job is still running.
+ """
+
+ finished_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the fine-tuning job was finished.
+
+ The value will be null if the fine-tuning job is still running.
+ """
+
+ hyperparameters: Hyperparameters
+ """The hyperparameters used for the fine-tuning job.
+
+ This value will only be returned when running `supervised` jobs.
+ """
+
+ model: str
+ """The base model that is being fine-tuned."""
+
+ object: Literal["fine_tuning.job"]
+ """The object type, which is always "fine_tuning.job"."""
+
+ organization_id: str
+ """The organization that owns the fine-tuning job."""
+
+ result_files: List[str]
+ """The compiled results file ID(s) for the fine-tuning job.
+
+ You can retrieve the results with the
+ [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
+ """
+
+ seed: int
+ """The seed used for the fine-tuning job."""
+
+ status: Literal["validating_files", "queued", "running", "succeeded", "failed", "cancelled"]
+ """
+ The current status of the fine-tuning job, which can be either
+ `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
+ """
+
+ trained_tokens: Optional[int] = None
+ """The total number of billable tokens processed by this fine-tuning job.
+
+ The value will be null if the fine-tuning job is still running.
+ """
+
+ training_file: str
+ """The file ID used for training.
+
+ You can retrieve the training data with the
+ [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
+ """
+
+ validation_file: Optional[str] = None
+ """The file ID used for validation.
+
+ You can retrieve the validation results with the
+ [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
+ """
+
+ estimated_finish: Optional[int] = None
+ """
+ The Unix timestamp (in seconds) for when the fine-tuning job is estimated to
+ finish. The value will be null if the fine-tuning job is not running.
+ """
+
+ integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None
+ """A list of integrations to enable for this fine-tuning job."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ method: Optional[Method] = None
+ """The method used for fine-tuning."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py
new file mode 100644
index 00000000..1d728bd7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import builtins
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FineTuningJobEvent"]
+
+
+class FineTuningJobEvent(BaseModel):
+ id: str
+ """The object identifier."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
+
+ level: Literal["info", "warn", "error"]
+ """The log level of the event."""
+
+ message: str
+ """The message of the event."""
+
+ object: Literal["fine_tuning.job.event"]
+ """The object type, which is always "fine_tuning.job.event"."""
+
+ data: Optional[builtins.object] = None
+ """The data associated with the event."""
+
+ type: Optional[Literal["message", "metrics"]] = None
+ """The type of event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py
new file mode 100644
index 00000000..9a66aa4f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject
+
+FineTuningJobIntegration = FineTuningJobWandbIntegrationObject
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py
new file mode 100644
index 00000000..4ac282eb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FineTuningJobWandbIntegration"]
+
+
+class FineTuningJobWandbIntegration(BaseModel):
+ project: str
+ """The name of the project that the new run will be created under."""
+
+ entity: Optional[str] = None
+ """The entity to use for the run.
+
+ This allows you to set the team or username of the WandB user that you would
+ like associated with the run. If not set, the default entity for the registered
+ WandB API key is used.
+ """
+
+ name: Optional[str] = None
+ """A display name to set for the run.
+
+ If not set, we will use the Job ID as the name.
+ """
+
+ tags: Optional[List[str]] = None
+ """A list of tags to be attached to the newly created run.
+
+ These tags are passed through directly to WandB. Some default tags are generated
+ by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py
new file mode 100644
index 00000000..5b94354d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .fine_tuning_job_wandb_integration import FineTuningJobWandbIntegration
+
+__all__ = ["FineTuningJobWandbIntegrationObject"]
+
+
+class FineTuningJobWandbIntegrationObject(BaseModel):
+ type: Literal["wandb"]
+ """The type of the integration being enabled for the fine-tuning job"""
+
+ wandb: FineTuningJobWandbIntegration
+ """The settings for your integration with Weights and Biases.
+
+ This payload specifies the project that metrics will be sent to. Optionally, you
+ can set an explicit display name for your run, add tags to your run, and set a
+ default entity (team, username, etc) to be associated with your run.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py
new file mode 100644
index 00000000..f4cf980b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py
@@ -0,0 +1,236 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = [
+ "JobCreateParams",
+ "Hyperparameters",
+ "Integration",
+ "IntegrationWandb",
+ "Method",
+ "MethodDpo",
+ "MethodDpoHyperparameters",
+ "MethodSupervised",
+ "MethodSupervisedHyperparameters",
+]
+
+
+class JobCreateParams(TypedDict, total=False):
+ model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]]
+ """The name of the model to fine-tune.
+
+ You can select one of the
+ [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
+ """
+
+ training_file: Required[str]
+ """The ID of an uploaded file that contains training data.
+
+ See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+ for how to upload a file.
+
+ Your dataset must be formatted as a JSONL file. Additionally, you must upload
+ your file with the purpose `fine-tune`.
+
+ The contents of the file should differ depending on if the model uses the
+ [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
+ [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
+ format, or if the fine-tuning method uses the
+ [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
+ format.
+
+ See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ for more details.
+ """
+
+ hyperparameters: Hyperparameters
+ """
+ The hyperparameters used for the fine-tuning job. This value is now deprecated
+ in favor of `method`, and should be passed in under the `method` parameter.
+ """
+
+ integrations: Optional[Iterable[Integration]]
+ """A list of integrations to enable for your fine-tuning job."""
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ method: Method
+ """The method used for fine-tuning."""
+
+ seed: Optional[int]
+ """The seed controls the reproducibility of the job.
+
+ Passing in the same seed and job parameters should produce the same results, but
+ may differ in rare cases. If a seed is not specified, one will be generated for
+ you.
+ """
+
+ suffix: Optional[str]
+ """
+ A string of up to 64 characters that will be added to your fine-tuned model
+ name.
+
+ For example, a `suffix` of "custom-model-name" would produce a model name like
+ `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
+ """
+
+ validation_file: Optional[str]
+ """The ID of an uploaded file that contains validation data.
+
+ If you provide this file, the data is used to generate validation metrics
+ periodically during fine-tuning. These metrics can be viewed in the fine-tuning
+ results file. The same data should not be present in both train and validation
+ files.
+
+ Your dataset must be formatted as a JSONL file. You must upload your file with
+ the purpose `fine-tune`.
+
+ See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ for more details.
+ """
+
+
+class Hyperparameters(TypedDict, total=False):
+ batch_size: Union[Literal["auto"], int]
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float]
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int]
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+
+class IntegrationWandb(TypedDict, total=False):
+ project: Required[str]
+ """The name of the project that the new run will be created under."""
+
+ entity: Optional[str]
+ """The entity to use for the run.
+
+ This allows you to set the team or username of the WandB user that you would
+ like associated with the run. If not set, the default entity for the registered
+ WandB API key is used.
+ """
+
+ name: Optional[str]
+ """A display name to set for the run.
+
+ If not set, we will use the Job ID as the name.
+ """
+
+ tags: List[str]
+ """A list of tags to be attached to the newly created run.
+
+ These tags are passed through directly to WandB. Some default tags are generated
+ by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
+ """
+
+
+class Integration(TypedDict, total=False):
+ type: Required[Literal["wandb"]]
+ """The type of integration to enable.
+
+ Currently, only "wandb" (Weights and Biases) is supported.
+ """
+
+ wandb: Required[IntegrationWandb]
+ """The settings for your integration with Weights and Biases.
+
+ This payload specifies the project that metrics will be sent to. Optionally, you
+ can set an explicit display name for your run, add tags to your run, and set a
+ default entity (team, username, etc) to be associated with your run.
+ """
+
+
+class MethodDpoHyperparameters(TypedDict, total=False):
+ batch_size: Union[Literal["auto"], int]
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ beta: Union[Literal["auto"], float]
+ """The beta value for the DPO method.
+
+ A higher beta value will increase the weight of the penalty between the policy
+ and reference model.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float]
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int]
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+
+class MethodDpo(TypedDict, total=False):
+ hyperparameters: MethodDpoHyperparameters
+ """The hyperparameters used for the fine-tuning job."""
+
+
+class MethodSupervisedHyperparameters(TypedDict, total=False):
+ batch_size: Union[Literal["auto"], int]
+ """Number of examples in each batch.
+
+ A larger batch size means that model parameters are updated less frequently, but
+ with lower variance.
+ """
+
+ learning_rate_multiplier: Union[Literal["auto"], float]
+ """Scaling factor for the learning rate.
+
+ A smaller learning rate may be useful to avoid overfitting.
+ """
+
+ n_epochs: Union[Literal["auto"], int]
+ """The number of epochs to train the model for.
+
+ An epoch refers to one full cycle through the training dataset.
+ """
+
+
+class MethodSupervised(TypedDict, total=False):
+ hyperparameters: MethodSupervisedHyperparameters
+ """The hyperparameters used for the fine-tuning job."""
+
+
+class Method(TypedDict, total=False):
+ dpo: MethodDpo
+ """Configuration for the DPO fine-tuning method."""
+
+ supervised: MethodSupervised
+ """Configuration for the supervised fine-tuning method."""
+
+ type: Literal["supervised", "dpo"]
+ """The type of method. Is either `supervised` or `dpo`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py
new file mode 100644
index 00000000..e1c9a64d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["JobListEventsParams"]
+
+
+class JobListEventsParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last event from the previous pagination request."""
+
+ limit: int
+ """Number of events to retrieve."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py
new file mode 100644
index 00000000..b79f3ce8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import TypedDict
+
+__all__ = ["JobListParams"]
+
+
+class JobListParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last job from the previous pagination request."""
+
+ limit: int
+ """Number of fine-tuning jobs to retrieve."""
+
+ metadata: Optional[Dict[str, str]]
+ """Optional metadata filter.
+
+ To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to
+ indicate no metadata.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py
new file mode 100644
index 00000000..6c93da1b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .checkpoint_list_params import CheckpointListParams as CheckpointListParams
+from .fine_tuning_job_checkpoint import FineTuningJobCheckpoint as FineTuningJobCheckpoint
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py
new file mode 100644
index 00000000..adceb3b2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["CheckpointListParams"]
+
+
+class CheckpointListParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last checkpoint ID from the previous pagination request."""
+
+ limit: int
+ """Number of checkpoints to retrieve."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py
new file mode 100644
index 00000000..bd07317a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FineTuningJobCheckpoint", "Metrics"]
+
+
+class Metrics(BaseModel):
+ full_valid_loss: Optional[float] = None
+
+ full_valid_mean_token_accuracy: Optional[float] = None
+
+ step: Optional[float] = None
+
+ train_loss: Optional[float] = None
+
+ train_mean_token_accuracy: Optional[float] = None
+
+ valid_loss: Optional[float] = None
+
+ valid_mean_token_accuracy: Optional[float] = None
+
+
+class FineTuningJobCheckpoint(BaseModel):
+ id: str
+ """The checkpoint identifier, which can be referenced in the API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the checkpoint was created."""
+
+ fine_tuned_model_checkpoint: str
+ """The name of the fine-tuned checkpoint model that is created."""
+
+ fine_tuning_job_id: str
+ """The name of the fine-tuning job that this checkpoint was created from."""
+
+ metrics: Metrics
+ """Metrics at the step number during the fine-tuning job."""
+
+ object: Literal["fine_tuning.job.checkpoint"]
+ """The object type, which is always "fine_tuning.job.checkpoint"."""
+
+ step_number: int
+ """The step number that the checkpoint was created at."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image.py b/.venv/lib/python3.12/site-packages/openai/types/image.py
new file mode 100644
index 00000000..f48aa2c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["Image"]
+
+
+class Image(BaseModel):
+ b64_json: Optional[str] = None
+ """
+ The base64-encoded JSON of the generated image, if `response_format` is
+ `b64_json`.
+ """
+
+ revised_prompt: Optional[str] = None
+ """
+ The prompt that was used to generate the image, if there was any revision to the
+ prompt.
+ """
+
+ url: Optional[str] = None
+ """The URL of the generated image, if `response_format` is `url` (default)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py b/.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py
new file mode 100644
index 00000000..d20f6729
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import FileTypes
+from .image_model import ImageModel
+
+__all__ = ["ImageCreateVariationParams"]
+
+
+class ImageCreateVariationParams(TypedDict, total=False):
+ image: Required[FileTypes]
+ """The image to use as the basis for the variation(s).
+
+ Must be a valid PNG file, less than 4MB, and square.
+ """
+
+ model: Union[str, ImageModel, None]
+ """The model to use for image generation.
+
+ Only `dall-e-2` is supported at this time.
+ """
+
+ n: Optional[int]
+ """The number of images to generate.
+
+ Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
+ """
+
+ response_format: Optional[Literal["url", "b64_json"]]
+ """The format in which the generated images are returned.
+
+ Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+ image has been generated.
+ """
+
+ size: Optional[Literal["256x256", "512x512", "1024x1024"]]
+ """The size of the generated images.
+
+ Must be one of `256x256`, `512x512`, or `1024x1024`.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py b/.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py
new file mode 100644
index 00000000..1cb10611
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py
@@ -0,0 +1,62 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import FileTypes
+from .image_model import ImageModel
+
+__all__ = ["ImageEditParams"]
+
+
+class ImageEditParams(TypedDict, total=False):
+ image: Required[FileTypes]
+ """The image to edit.
+
+ Must be a valid PNG file, less than 4MB, and square. If mask is not provided,
+ image must have transparency, which will be used as the mask.
+ """
+
+ prompt: Required[str]
+ """A text description of the desired image(s).
+
+ The maximum length is 1000 characters.
+ """
+
+ mask: FileTypes
+ """An additional image whose fully transparent areas (e.g.
+
+ where alpha is zero) indicate where `image` should be edited. Must be a valid
+ PNG file, less than 4MB, and have the same dimensions as `image`.
+ """
+
+ model: Union[str, ImageModel, None]
+ """The model to use for image generation.
+
+ Only `dall-e-2` is supported at this time.
+ """
+
+ n: Optional[int]
+ """The number of images to generate. Must be between 1 and 10."""
+
+ response_format: Optional[Literal["url", "b64_json"]]
+ """The format in which the generated images are returned.
+
+ Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+ image has been generated.
+ """
+
+ size: Optional[Literal["256x256", "512x512", "1024x1024"]]
+ """The size of the generated images.
+
+ Must be one of `256x256`, `512x512`, or `1024x1024`.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py b/.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py
new file mode 100644
index 00000000..c88c45f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py
@@ -0,0 +1,65 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .image_model import ImageModel
+
+__all__ = ["ImageGenerateParams"]
+
+
+class ImageGenerateParams(TypedDict, total=False):
+ prompt: Required[str]
+ """A text description of the desired image(s).
+
+ The maximum length is 1000 characters for `dall-e-2` and 4000 characters for
+ `dall-e-3`.
+ """
+
+ model: Union[str, ImageModel, None]
+ """The model to use for image generation."""
+
+ n: Optional[int]
+ """The number of images to generate.
+
+ Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
+ """
+
+ quality: Literal["standard", "hd"]
+ """The quality of the image that will be generated.
+
+ `hd` creates images with finer details and greater consistency across the image.
+ This param is only supported for `dall-e-3`.
+ """
+
+ response_format: Optional[Literal["url", "b64_json"]]
+ """The format in which the generated images are returned.
+
+ Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+ image has been generated.
+ """
+
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
+ """The size of the generated images.
+
+ Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one
+ of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
+ """
+
+ style: Optional[Literal["vivid", "natural"]]
+ """The style of the generated images.
+
+ Must be one of `vivid` or `natural`. Vivid causes the model to lean towards
+ generating hyper-real and dramatic images. Natural causes the model to produce
+ more natural, less hyper-real looking images. This param is only supported for
+ `dall-e-3`.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_model.py b/.venv/lib/python3.12/site-packages/openai/types/image_model.py
new file mode 100644
index 00000000..1672369b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ImageModel"]
+
+ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/images_response.py b/.venv/lib/python3.12/site-packages/openai/types/images_response.py
new file mode 100644
index 00000000..7cee8131
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/images_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .image import Image
+from .._models import BaseModel
+
+__all__ = ["ImagesResponse"]
+
+
+class ImagesResponse(BaseModel):
+ created: int
+
+ data: List[Image]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/model.py b/.venv/lib/python3.12/site-packages/openai/types/model.py
new file mode 100644
index 00000000..2631ee8d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/model.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Model"]
+
+
+class Model(BaseModel):
+ id: str
+ """The model identifier, which can be referenced in the API endpoints."""
+
+ created: int
+ """The Unix timestamp (in seconds) when the model was created."""
+
+ object: Literal["model"]
+ """The object type, which is always "model"."""
+
+ owned_by: str
+ """The organization that owns the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/model_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/model_deleted.py
new file mode 100644
index 00000000..7f81e1b3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/model_deleted.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["ModelDeleted"]
+
+
+class ModelDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation.py b/.venv/lib/python3.12/site-packages/openai/types/moderation.py
new file mode 100644
index 00000000..608f5622
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation.py
@@ -0,0 +1,186 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["Moderation", "Categories", "CategoryAppliedInputTypes", "CategoryScores"]
+
+
+class Categories(BaseModel):
+ harassment: bool
+ """
+ Content that expresses, incites, or promotes harassing language towards any
+ target.
+ """
+
+ harassment_threatening: bool = FieldInfo(alias="harassment/threatening")
+ """
+ Harassment content that also includes violence or serious harm towards any
+ target.
+ """
+
+ hate: bool
+ """
+ Content that expresses, incites, or promotes hate based on race, gender,
+ ethnicity, religion, nationality, sexual orientation, disability status, or
+ caste. Hateful content aimed at non-protected groups (e.g., chess players) is
+ harassment.
+ """
+
+ hate_threatening: bool = FieldInfo(alias="hate/threatening")
+ """
+ Hateful content that also includes violence or serious harm towards the targeted
+ group based on race, gender, ethnicity, religion, nationality, sexual
+ orientation, disability status, or caste.
+ """
+
+ illicit: Optional[bool] = None
+ """
+ Content that includes instructions or advice that facilitate the planning or
+ execution of wrongdoing, or that gives advice or instruction on how to commit
+ illicit acts. For example, "how to shoplift" would fit this category.
+ """
+
+ illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None)
+ """
+ Content that includes instructions or advice that facilitate the planning or
+ execution of wrongdoing that also includes violence, or that gives advice or
+ instruction on the procurement of any weapon.
+ """
+
+ self_harm: bool = FieldInfo(alias="self-harm")
+ """
+ Content that promotes, encourages, or depicts acts of self-harm, such as
+ suicide, cutting, and eating disorders.
+ """
+
+ self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions")
+ """
+ Content that encourages performing acts of self-harm, such as suicide, cutting,
+ and eating disorders, or that gives instructions or advice on how to commit such
+ acts.
+ """
+
+ self_harm_intent: bool = FieldInfo(alias="self-harm/intent")
+ """
+ Content where the speaker expresses that they are engaging or intend to engage
+ in acts of self-harm, such as suicide, cutting, and eating disorders.
+ """
+
+ sexual: bool
+ """
+ Content meant to arouse sexual excitement, such as the description of sexual
+ activity, or that promotes sexual services (excluding sex education and
+ wellness).
+ """
+
+ sexual_minors: bool = FieldInfo(alias="sexual/minors")
+ """Sexual content that includes an individual who is under 18 years old."""
+
+ violence: bool
+ """Content that depicts death, violence, or physical injury."""
+
+ violence_graphic: bool = FieldInfo(alias="violence/graphic")
+ """Content that depicts death, violence, or physical injury in graphic detail."""
+
+
+class CategoryAppliedInputTypes(BaseModel):
+ harassment: List[Literal["text"]]
+ """The applied input type(s) for the category 'harassment'."""
+
+ harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening")
+ """The applied input type(s) for the category 'harassment/threatening'."""
+
+ hate: List[Literal["text"]]
+ """The applied input type(s) for the category 'hate'."""
+
+ hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening")
+ """The applied input type(s) for the category 'hate/threatening'."""
+
+ illicit: List[Literal["text"]]
+ """The applied input type(s) for the category 'illicit'."""
+
+ illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent")
+ """The applied input type(s) for the category 'illicit/violent'."""
+
+ self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm")
+ """The applied input type(s) for the category 'self-harm'."""
+
+ self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions")
+ """The applied input type(s) for the category 'self-harm/instructions'."""
+
+ self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent")
+ """The applied input type(s) for the category 'self-harm/intent'."""
+
+ sexual: List[Literal["text", "image"]]
+ """The applied input type(s) for the category 'sexual'."""
+
+ sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors")
+ """The applied input type(s) for the category 'sexual/minors'."""
+
+ violence: List[Literal["text", "image"]]
+ """The applied input type(s) for the category 'violence'."""
+
+ violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic")
+ """The applied input type(s) for the category 'violence/graphic'."""
+
+
+class CategoryScores(BaseModel):
+ harassment: float
+ """The score for the category 'harassment'."""
+
+ harassment_threatening: float = FieldInfo(alias="harassment/threatening")
+ """The score for the category 'harassment/threatening'."""
+
+ hate: float
+ """The score for the category 'hate'."""
+
+ hate_threatening: float = FieldInfo(alias="hate/threatening")
+ """The score for the category 'hate/threatening'."""
+
+ illicit: float
+ """The score for the category 'illicit'."""
+
+ illicit_violent: float = FieldInfo(alias="illicit/violent")
+ """The score for the category 'illicit/violent'."""
+
+ self_harm: float = FieldInfo(alias="self-harm")
+ """The score for the category 'self-harm'."""
+
+ self_harm_instructions: float = FieldInfo(alias="self-harm/instructions")
+ """The score for the category 'self-harm/instructions'."""
+
+ self_harm_intent: float = FieldInfo(alias="self-harm/intent")
+ """The score for the category 'self-harm/intent'."""
+
+ sexual: float
+ """The score for the category 'sexual'."""
+
+ sexual_minors: float = FieldInfo(alias="sexual/minors")
+ """The score for the category 'sexual/minors'."""
+
+ violence: float
+ """The score for the category 'violence'."""
+
+ violence_graphic: float = FieldInfo(alias="violence/graphic")
+ """The score for the category 'violence/graphic'."""
+
+
+class Moderation(BaseModel):
+ categories: Categories
+ """A list of the categories, and whether they are flagged or not."""
+
+ category_applied_input_types: CategoryAppliedInputTypes
+ """
+ A list of the categories along with the input type(s) that the score applies to.
+ """
+
+ category_scores: CategoryScores
+ """A list of the categories along with their scores as predicted by model."""
+
+ flagged: bool
+ """Whether any of the below categories are flagged."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py
new file mode 100644
index 00000000..3ea2f3cd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Required, TypedDict
+
+from .moderation_model import ModerationModel
+from .moderation_multi_modal_input_param import ModerationMultiModalInputParam
+
+__all__ = ["ModerationCreateParams"]
+
+
+class ModerationCreateParams(TypedDict, total=False):
+ input: Required[Union[str, List[str], Iterable[ModerationMultiModalInputParam]]]
+ """Input (or inputs) to classify.
+
+ Can be a single string, an array of strings, or an array of multi-modal input
+ objects similar to other models.
+ """
+
+ model: Union[str, ModerationModel]
+ """The content moderation model you would like to use.
+
+ Learn more in
+ [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
+ learn about available models
+ [here](https://platform.openai.com/docs/models#moderation).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py
new file mode 100644
index 00000000..79684f8a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .moderation import Moderation
+
+__all__ = ["ModerationCreateResponse"]
+
+
+class ModerationCreateResponse(BaseModel):
+ id: str
+ """The unique identifier for the moderation request."""
+
+ model: str
+ """The model used to generate the moderation results."""
+
+ results: List[Moderation]
+ """A list of moderation objects."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py
new file mode 100644
index 00000000..9a69a6a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ModerationImageURLInputParam", "ImageURL"]
+
+
+class ImageURL(TypedDict, total=False):
+ url: Required[str]
+ """Either a URL of the image or the base64 encoded image data."""
+
+
+class ModerationImageURLInputParam(TypedDict, total=False):
+ image_url: Required[ImageURL]
+ """Contains either an image URL or a data URL for a base64 encoded image."""
+
+ type: Required[Literal["image_url"]]
+ """Always `image_url`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_model.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_model.py
new file mode 100644
index 00000000..64954c45
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_model.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ModerationModel"]
+
+ModerationModel: TypeAlias = Literal[
+ "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable"
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py
new file mode 100644
index 00000000..4314e7b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .moderation_text_input_param import ModerationTextInputParam
+from .moderation_image_url_input_param import ModerationImageURLInputParam
+
+__all__ = ["ModerationMultiModalInputParam"]
+
+ModerationMultiModalInputParam: TypeAlias = Union[ModerationImageURLInputParam, ModerationTextInputParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py
new file mode 100644
index 00000000..e5da5333
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ModerationTextInputParam"]
+
+
+class ModerationTextInputParam(TypedDict, total=False):
+ text: Required[str]
+ """A string of text to classify."""
+
+ type: Required[Literal["text"]]
+ """Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py b/.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py
new file mode 100644
index 00000000..e4cd61a8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["OtherFileChunkingStrategyObject"]
+
+
+class OtherFileChunkingStrategyObject(BaseModel):
+ type: Literal["other"]
+ """Always `other`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py
new file mode 100644
index 00000000..4f07a3d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py
@@ -0,0 +1,155 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .tool import Tool as Tool
+from .response import Response as Response
+from .tool_param import ToolParam as ToolParam
+from .computer_tool import ComputerTool as ComputerTool
+from .function_tool import FunctionTool as FunctionTool
+from .response_item import ResponseItem as ResponseItem
+from .response_error import ResponseError as ResponseError
+from .response_usage import ResponseUsage as ResponseUsage
+from .parsed_response import (
+ ParsedContent as ParsedContent,
+ ParsedResponse as ParsedResponse,
+ ParsedResponseOutputItem as ParsedResponseOutputItem,
+ ParsedResponseOutputText as ParsedResponseOutputText,
+ ParsedResponseOutputMessage as ParsedResponseOutputMessage,
+ ParsedResponseFunctionToolCall as ParsedResponseFunctionToolCall,
+)
+from .response_status import ResponseStatus as ResponseStatus
+from .web_search_tool import WebSearchTool as WebSearchTool
+from .file_search_tool import FileSearchTool as FileSearchTool
+from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes
+from .response_item_list import ResponseItemList as ResponseItemList
+from .computer_tool_param import ComputerToolParam as ComputerToolParam
+from .function_tool_param import FunctionToolParam as FunctionToolParam
+from .response_includable import ResponseIncludable as ResponseIncludable
+from .response_input_file import ResponseInputFile as ResponseInputFile
+from .response_input_text import ResponseInputText as ResponseInputText
+from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions
+from .response_error_event import ResponseErrorEvent as ResponseErrorEvent
+from .response_input_image import ResponseInputImage as ResponseInputImage
+from .response_input_param import ResponseInputParam as ResponseInputParam
+from .response_output_item import ResponseOutputItem as ResponseOutputItem
+from .response_output_text import ResponseOutputText as ResponseOutputText
+from .response_text_config import ResponseTextConfig as ResponseTextConfig
+from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction
+from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent
+from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent
+from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam
+from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
+from .input_item_list_params import InputItemListParams as InputItemListParams
+from .response_create_params import ResponseCreateParams as ResponseCreateParams
+from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent
+from .response_input_content import ResponseInputContent as ResponseInputContent
+from .response_output_message import ResponseOutputMessage as ResponseOutputMessage
+from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal
+from .response_reasoning_item import ResponseReasoningItem as ResponseReasoningItem
+from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam
+from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam
+from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent
+from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams
+from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent
+from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent
+from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent
+from .response_input_file_param import ResponseInputFileParam as ResponseInputFileParam
+from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam
+from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam
+from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
+from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
+from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
+from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam
+from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
+from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
+from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
+from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
+from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall
+from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem
+from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent
+from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch
+from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam
+from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent
+from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam
+from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam
+from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam
+from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
+from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
+from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem
+from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
+from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam
+from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
+from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall
+from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam
+from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent
+from .response_audio_transcript_delta_event import (
+ ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
+)
+from .response_computer_tool_call_output_item import (
+ ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem,
+)
+from .response_format_text_json_schema_config import (
+ ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig,
+)
+from .response_function_tool_call_output_item import (
+ ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem,
+)
+from .response_web_search_call_completed_event import (
+ ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent,
+)
+from .response_web_search_call_searching_event import (
+ ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent,
+)
+from .response_file_search_call_completed_event import (
+ ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent,
+)
+from .response_file_search_call_searching_event import (
+ ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent,
+)
+from .response_input_message_content_list_param import (
+ ResponseInputMessageContentListParam as ResponseInputMessageContentListParam,
+)
+from .response_web_search_call_in_progress_event import (
+ ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,
+)
+from .response_file_search_call_in_progress_event import (
+ ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent,
+)
+from .response_function_call_arguments_done_event import (
+ ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
+)
+from .response_function_call_arguments_delta_event import (
+ ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
+)
+from .response_computer_tool_call_output_screenshot import (
+ ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot,
+)
+from .response_format_text_json_schema_config_param import (
+ ResponseFormatTextJSONSchemaConfigParam as ResponseFormatTextJSONSchemaConfigParam,
+)
+from .response_code_interpreter_call_code_done_event import (
+ ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent,
+)
+from .response_code_interpreter_call_completed_event import (
+ ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent,
+)
+from .response_code_interpreter_call_code_delta_event import (
+ ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent,
+)
+from .response_code_interpreter_call_in_progress_event import (
+ ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent,
+)
+from .response_code_interpreter_call_interpreting_event import (
+ ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent,
+)
+from .response_computer_tool_call_output_screenshot_param import (
+ ResponseComputerToolCallOutputScreenshotParam as ResponseComputerToolCallOutputScreenshotParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py
new file mode 100644
index 00000000..dffb7af7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComputerTool"]
+
+
+class ComputerTool(BaseModel):
+ display_height: float
+ """The height of the computer display."""
+
+ display_width: float
+ """The width of the computer display."""
+
+ environment: Literal["mac", "windows", "ubuntu", "browser"]
+ """The type of computer environment to control."""
+
+ type: Literal["computer_use_preview"]
+ """The type of the computer use tool. Always `computer_use_preview`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py
new file mode 100644
index 00000000..6b1072ff
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ComputerToolParam"]
+
+
+class ComputerToolParam(TypedDict, total=False):
+ display_height: Required[float]
+ """The height of the computer display."""
+
+ display_width: Required[float]
+ """The width of the computer display."""
+
+ environment: Required[Literal["mac", "windows", "ubuntu", "browser"]]
+ """The type of computer environment to control."""
+
+ type: Required[Literal["computer_use_preview"]]
+ """The type of the computer use tool. Always `computer_use_preview`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py
new file mode 100644
index 00000000..ef2f1c5f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+
+__all__ = ["EasyInputMessageParam"]
+
+
+class EasyInputMessageParam(TypedDict, total=False):
+ content: Required[Union[str, ResponseInputMessageContentListParam]]
+ """
+ Text, image, or audio input to the model, used to generate a response. Can also
+ contain previous assistant responses.
+ """
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py
new file mode 100644
index 00000000..683fc533
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from ..shared.compound_filter import CompoundFilter
+from ..shared.comparison_filter import ComparisonFilter
+
+__all__ = ["FileSearchTool", "Filters", "RankingOptions"]
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(BaseModel):
+ ranker: Optional[Literal["auto", "default-2024-11-15"]] = None
+ """The ranker to use for the file search."""
+
+ score_threshold: Optional[float] = None
+ """
+ The score threshold for the file search, a number between 0 and 1. Numbers
+ closer to 1 will attempt to return only the most relevant results, but may
+ return fewer results.
+ """
+
+
+class FileSearchTool(BaseModel):
+ type: Literal["file_search"]
+ """The type of the file search tool. Always `file_search`."""
+
+ vector_store_ids: List[str]
+ """The IDs of the vector stores to search."""
+
+ filters: Optional[Filters] = None
+ """A filter to apply based on file attributes."""
+
+ max_num_results: Optional[int] = None
+ """The maximum number of results to return.
+
+ This number should be between 1 and 50 inclusive.
+ """
+
+ ranking_options: Optional[RankingOptions] = None
+ """Ranking options for search."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py
new file mode 100644
index 00000000..2d6af853
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared_params.compound_filter import CompoundFilter
+from ..shared_params.comparison_filter import ComparisonFilter
+
+__all__ = ["FileSearchToolParam", "Filters", "RankingOptions"]
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(TypedDict, total=False):
+ ranker: Literal["auto", "default-2024-11-15"]
+ """The ranker to use for the file search."""
+
+ score_threshold: float
+ """
+ The score threshold for the file search, a number between 0 and 1. Numbers
+ closer to 1 will attempt to return only the most relevant results, but may
+ return fewer results.
+ """
+
+
+class FileSearchToolParam(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """The type of the file search tool. Always `file_search`."""
+
+ vector_store_ids: Required[List[str]]
+ """The IDs of the vector stores to search."""
+
+ filters: Filters
+ """A filter to apply based on file attributes."""
+
+ max_num_results: int
+ """The maximum number of results to return.
+
+ This number should be between 1 and 50 inclusive.
+ """
+
+ ranking_options: RankingOptions
+ """Ranking options for search."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py
new file mode 100644
index 00000000..236a2c7c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FunctionTool"]
+
+
+class FunctionTool(BaseModel):
+ name: str
+ """The name of the function to call."""
+
+ parameters: Dict[str, object]
+ """A JSON schema object describing the parameters of the function."""
+
+ strict: bool
+ """Whether to enforce strict parameter validation. Default `true`."""
+
+ type: Literal["function"]
+ """The type of the function tool. Always `function`."""
+
+ description: Optional[str] = None
+ """A description of the function.
+
+ Used by the model to determine whether or not to call the function.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py
new file mode 100644
index 00000000..774a22e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FunctionToolParam"]
+
+
+class FunctionToolParam(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
+
+ parameters: Required[Dict[str, object]]
+ """A JSON schema object describing the parameters of the function."""
+
+ strict: Required[bool]
+ """Whether to enforce strict parameter validation. Default `true`."""
+
+ type: Required[Literal["function"]]
+ """The type of the function tool. Always `function`."""
+
+ description: Optional[str]
+ """A description of the function.
+
+ Used by the model to determine whether or not to call the function.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py
new file mode 100644
index 00000000..e0b71f1a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["InputItemListParams"]
+
+
+class InputItemListParams(TypedDict, total=False):
+ after: str
+ """An item ID to list items after, used in pagination."""
+
+ before: str
+ """An item ID to list items before, used in pagination."""
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """The order to return the input items in. Default is `asc`.
+
+ - `asc`: Return the input items in ascending order.
+ - `desc`: Return the input items in descending order.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py b/.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py
new file mode 100644
index 00000000..1263dfd6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py
@@ -0,0 +1,77 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import TYPE_CHECKING, List, Union, Generic, TypeVar, Optional
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response import Response
+from ..._models import GenericModel
+from ..._utils._transform import PropertyInfo
+from .response_output_text import ResponseOutputText
+from .response_output_message import ResponseOutputMessage
+from .response_output_refusal import ResponseOutputRefusal
+from .response_reasoning_item import ResponseReasoningItem
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_function_tool_call import ResponseFunctionToolCall
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+
+__all__ = ["ParsedResponse", "ParsedResponseOutputMessage", "ParsedResponseOutputText"]
+
+ContentType = TypeVar("ContentType")
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedResponseOutputText(ResponseOutputText, GenericModel, Generic[ContentType]):
+ parsed: Optional[ContentType] = None
+
+
+ParsedContent: TypeAlias = Annotated[
+ Union[ParsedResponseOutputText[ContentType], ResponseOutputRefusal],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class ParsedResponseOutputMessage(ResponseOutputMessage, GenericModel, Generic[ContentType]):
+ if TYPE_CHECKING:
+ content: List[ParsedContent[ContentType]] # type: ignore[assignment]
+ else:
+ content: List[ParsedContent]
+
+
+class ParsedResponseFunctionToolCall(ResponseFunctionToolCall):
+ parsed_arguments: object = None
+
+
+ParsedResponseOutputItem: TypeAlias = Annotated[
+ Union[
+ ParsedResponseOutputMessage[ContentType],
+ ParsedResponseFunctionToolCall,
+ ResponseFileSearchToolCall,
+ ResponseFunctionWebSearch,
+ ResponseComputerToolCall,
+ ResponseReasoningItem,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class ParsedResponse(Response, GenericModel, Generic[ContentType]):
+ if TYPE_CHECKING:
+ output: List[ParsedResponseOutputItem[ContentType]] # type: ignore[assignment]
+ else:
+ output: List[ParsedResponseOutputItem]
+
+ @property
+ def output_parsed(self) -> Optional[ContentType]:
+ for output in self.output:
+ if output.type == "message":
+ for content in output.content:
+ if content.type == "output_text" and content.parsed:
+ return content.parsed
+
+ return None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response.py
new file mode 100644
index 00000000..1bedf808
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response.py
@@ -0,0 +1,204 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from .tool import Tool
+from ..._models import BaseModel
+from .response_error import ResponseError
+from .response_usage import ResponseUsage
+from .response_status import ResponseStatus
+from ..shared.metadata import Metadata
+from ..shared.reasoning import Reasoning
+from .tool_choice_types import ToolChoiceTypes
+from .tool_choice_options import ToolChoiceOptions
+from .response_output_item import ResponseOutputItem
+from .response_text_config import ResponseTextConfig
+from .tool_choice_function import ToolChoiceFunction
+from ..shared.responses_model import ResponsesModel
+
+__all__ = ["Response", "IncompleteDetails", "ToolChoice"]
+
+
+class IncompleteDetails(BaseModel):
+ reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
+ """The reason why the response is incomplete."""
+
+
+ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction]
+
+
+class Response(BaseModel):
+ id: str
+ """Unique identifier for this Response."""
+
+ created_at: float
+ """Unix timestamp (in seconds) of when this Response was created."""
+
+ error: Optional[ResponseError] = None
+ """An error object returned when the model fails to generate a Response."""
+
+ incomplete_details: Optional[IncompleteDetails] = None
+ """Details about why the response is incomplete."""
+
+ instructions: Optional[str] = None
+ """
+ Inserts a system (or developer) message as the first item in the model's
+ context.
+
+ When using along with `previous_response_id`, the instructions from a previous
+ response will be not be carried over to the next response. This makes it simple
+ to swap out system (or developer) messages in new responses.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: ResponsesModel
+ """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+ OpenAI offers a wide range of models with different capabilities, performance
+ characteristics, and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+ """
+
+ object: Literal["response"]
+ """The object type of this resource - always set to `response`."""
+
+ output: List[ResponseOutputItem]
+ """An array of content items generated by the model.
+
+ - The length and order of items in the `output` array is dependent on the
+ model's response.
+ - Rather than accessing the first item in the `output` array and assuming it's
+ an `assistant` message with the content generated by the model, you might
+ consider using the `output_text` property where supported in SDKs.
+ """
+
+ parallel_tool_calls: bool
+ """Whether to allow the model to run tool calls in parallel."""
+
+ temperature: Optional[float] = None
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ tool_choice: ToolChoice
+ """
+ How the model should select which tool (or tools) to use when generating a
+ response. See the `tools` parameter to see how to specify which tools the model
+ can call.
+ """
+
+ tools: List[Tool]
+ """An array of tools the model may call while generating a response.
+
+ You can specify which tool to use by setting the `tool_choice` parameter.
+
+ The two categories of tools you can provide the model are:
+
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+ capabilities, like
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
+ the model to call your own code. Learn more about
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
+ """
+
+ top_p: Optional[float] = None
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ max_output_tokens: Optional[int] = None
+ """
+ An upper bound for the number of tokens that can be generated for a response,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ previous_response_id: Optional[str] = None
+ """The unique ID of the previous response to the model.
+
+ Use this to create multi-turn conversations. Learn more about
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ """
+
+ reasoning: Optional[Reasoning] = None
+ """**o-series models only**
+
+ Configuration options for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ status: Optional[ResponseStatus] = None
+ """The status of the response generation.
+
+ One of `completed`, `failed`, `in_progress`, or `incomplete`.
+ """
+
+ text: Optional[ResponseTextConfig] = None
+ """Configuration options for a text response from the model.
+
+ Can be plain text or structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+ """
+
+ truncation: Optional[Literal["auto", "disabled"]] = None
+ """The truncation strategy to use for the model response.
+
+ - `auto`: If the context of this response and previous ones exceeds the model's
+ context window size, the model will truncate the response to fit the context
+ window by dropping input items in the middle of the conversation.
+ - `disabled` (default): If a model response will exceed the context window size
+ for a model, the request will fail with a 400 error.
+ """
+
+ usage: Optional[ResponseUsage] = None
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used.
+ """
+
+ user: Optional[str] = None
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
+
+ @property
+ def output_text(self) -> str:
+ """Convenience property that aggregates all `output_text` items from the `output`
+ list.
+
+ If no `output_text` content blocks exist, then an empty string is returned.
+ """
+ texts: List[str] = []
+ for output in self.output:
+ if output.type == "message":
+ for content in output.content:
+ if content.type == "output_text":
+ texts.append(content.text)
+
+ return "".join(texts)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py
new file mode 100644
index 00000000..f3d77fac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioDeltaEvent"]
+
+
+class ResponseAudioDeltaEvent(BaseModel):
+ delta: str
+ """A chunk of Base64 encoded response audio bytes."""
+
+ type: Literal["response.audio.delta"]
+ """The type of the event. Always `response.audio.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py
new file mode 100644
index 00000000..5654f8e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioDoneEvent"]
+
+
+class ResponseAudioDoneEvent(BaseModel):
+ type: Literal["response.audio.done"]
+ """The type of the event. Always `response.audio.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py
new file mode 100644
index 00000000..69b6660f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDeltaEvent"]
+
+
+class ResponseAudioTranscriptDeltaEvent(BaseModel):
+ delta: str
+ """The partial transcript of the audio response."""
+
+ type: Literal["response.audio.transcript.delta"]
+ """The type of the event. Always `response.audio.transcript.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py
new file mode 100644
index 00000000..1a20319f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDoneEvent"]
+
+
+class ResponseAudioTranscriptDoneEvent(BaseModel):
+ type: Literal["response.audio.transcript.done"]
+ """The type of the event. Always `response.audio.transcript.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py
new file mode 100644
index 00000000..7527238d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterCallCodeDeltaEvent"]
+
+
+class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
+ delta: str
+ """The partial code snippet added by the code interpreter."""
+
+ output_index: int
+ """The index of the output item that the code interpreter call is in progress."""
+
+ type: Literal["response.code_interpreter_call.code.delta"]
+ """The type of the event. Always `response.code_interpreter_call.code.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py
new file mode 100644
index 00000000..f84d4cf3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterCallCodeDoneEvent"]
+
+
+class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
+ code: str
+ """The final code snippet output by the code interpreter."""
+
+ output_index: int
+ """The index of the output item that the code interpreter call is in progress."""
+
+ type: Literal["response.code_interpreter_call.code.done"]
+ """The type of the event. Always `response.code_interpreter_call.code.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py
new file mode 100644
index 00000000..b0cb73fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallCompletedEvent"]
+
+
+class ResponseCodeInterpreterCallCompletedEvent(BaseModel):
+ code_interpreter_call: ResponseCodeInterpreterToolCall
+ """A tool call to run code."""
+
+ output_index: int
+ """The index of the output item that the code interpreter call is in progress."""
+
+ type: Literal["response.code_interpreter_call.completed"]
+ """The type of the event. Always `response.code_interpreter_call.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py
new file mode 100644
index 00000000..64b739f3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallInProgressEvent"]
+
+
+class ResponseCodeInterpreterCallInProgressEvent(BaseModel):
+ code_interpreter_call: ResponseCodeInterpreterToolCall
+ """A tool call to run code."""
+
+ output_index: int
+ """The index of the output item that the code interpreter call is in progress."""
+
+ type: Literal["response.code_interpreter_call.in_progress"]
+ """The type of the event. Always `response.code_interpreter_call.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py
new file mode 100644
index 00000000..3100eac1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallInterpretingEvent"]
+
+
+class ResponseCodeInterpreterCallInterpretingEvent(BaseModel):
+ code_interpreter_call: ResponseCodeInterpreterToolCall
+ """A tool call to run code."""
+
+ output_index: int
+ """The index of the output item that the code interpreter call is in progress."""
+
+ type: Literal["response.code_interpreter_call.interpreting"]
+ """The type of the event. Always `response.code_interpreter_call.interpreting`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py
new file mode 100644
index 00000000..d5a50570
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterToolCall", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"]
+
+
+class ResultLogs(BaseModel):
+ logs: str
+ """The logs of the code interpreter tool call."""
+
+ type: Literal["logs"]
+ """The type of the code interpreter text output. Always `logs`."""
+
+
+class ResultFilesFile(BaseModel):
+ file_id: str
+ """The ID of the file."""
+
+ mime_type: str
+ """The MIME type of the file."""
+
+
+class ResultFiles(BaseModel):
+ files: List[ResultFilesFile]
+
+ type: Literal["files"]
+ """The type of the code interpreter file output. Always `files`."""
+
+
+Result: TypeAlias = Annotated[Union[ResultLogs, ResultFiles], PropertyInfo(discriminator="type")]
+
+
+class ResponseCodeInterpreterToolCall(BaseModel):
+ id: str
+ """The unique ID of the code interpreter tool call."""
+
+ code: str
+ """The code to run."""
+
+ results: List[Result]
+ """The results of the code interpreter tool call."""
+
+ status: Literal["in_progress", "interpreting", "completed"]
+ """The status of the code interpreter tool call."""
+
+ type: Literal["code_interpreter_call"]
+ """The type of the code interpreter tool call. Always `code_interpreter_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py
new file mode 100644
index 00000000..a944f248
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseCompletedEvent"]
+
+
+class ResponseCompletedEvent(BaseModel):
+ response: Response
+ """Properties of the completed response."""
+
+ type: Literal["response.completed"]
+ """The type of the event. Always `response.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py
new file mode 100644
index 00000000..99483756
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py
@@ -0,0 +1,212 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+ "ResponseComputerToolCall",
+ "Action",
+ "ActionClick",
+ "ActionDoubleClick",
+ "ActionDrag",
+ "ActionDragPath",
+ "ActionKeypress",
+ "ActionMove",
+ "ActionScreenshot",
+ "ActionScroll",
+ "ActionType",
+ "ActionWait",
+ "PendingSafetyCheck",
+]
+
+
+class ActionClick(BaseModel):
+ button: Literal["left", "right", "wheel", "back", "forward"]
+ """Indicates which mouse button was pressed during the click.
+
+ One of `left`, `right`, `wheel`, `back`, or `forward`.
+ """
+
+ type: Literal["click"]
+ """Specifies the event type.
+
+ For a click action, this property is always set to `click`.
+ """
+
+ x: int
+ """The x-coordinate where the click occurred."""
+
+ y: int
+ """The y-coordinate where the click occurred."""
+
+
+class ActionDoubleClick(BaseModel):
+ type: Literal["double_click"]
+ """Specifies the event type.
+
+ For a double click action, this property is always set to `double_click`.
+ """
+
+ x: int
+ """The x-coordinate where the double click occurred."""
+
+ y: int
+ """The y-coordinate where the double click occurred."""
+
+
+class ActionDragPath(BaseModel):
+ x: int
+ """The x-coordinate."""
+
+ y: int
+ """The y-coordinate."""
+
+
+class ActionDrag(BaseModel):
+ path: List[ActionDragPath]
+ """An array of coordinates representing the path of the drag action.
+
+ Coordinates will appear as an array of objects, eg
+
+ ```
+ [
+ { x: 100, y: 200 },
+ { x: 200, y: 300 }
+ ]
+ ```
+ """
+
+ type: Literal["drag"]
+ """Specifies the event type.
+
+ For a drag action, this property is always set to `drag`.
+ """
+
+
+class ActionKeypress(BaseModel):
+ keys: List[str]
+ """The combination of keys the model is requesting to be pressed.
+
+ This is an array of strings, each representing a key.
+ """
+
+ type: Literal["keypress"]
+ """Specifies the event type.
+
+ For a keypress action, this property is always set to `keypress`.
+ """
+
+
+class ActionMove(BaseModel):
+ type: Literal["move"]
+ """Specifies the event type.
+
+ For a move action, this property is always set to `move`.
+ """
+
+ x: int
+ """The x-coordinate to move to."""
+
+ y: int
+ """The y-coordinate to move to."""
+
+
+class ActionScreenshot(BaseModel):
+ type: Literal["screenshot"]
+ """Specifies the event type.
+
+ For a screenshot action, this property is always set to `screenshot`.
+ """
+
+
+class ActionScroll(BaseModel):
+ scroll_x: int
+ """The horizontal scroll distance."""
+
+ scroll_y: int
+ """The vertical scroll distance."""
+
+ type: Literal["scroll"]
+ """Specifies the event type.
+
+ For a scroll action, this property is always set to `scroll`.
+ """
+
+ x: int
+ """The x-coordinate where the scroll occurred."""
+
+ y: int
+ """The y-coordinate where the scroll occurred."""
+
+
+class ActionType(BaseModel):
+ text: str
+ """The text to type."""
+
+ type: Literal["type"]
+ """Specifies the event type.
+
+ For a type action, this property is always set to `type`.
+ """
+
+
+class ActionWait(BaseModel):
+ type: Literal["wait"]
+ """Specifies the event type.
+
+ For a wait action, this property is always set to `wait`.
+ """
+
+
+Action: TypeAlias = Annotated[
+ Union[
+ ActionClick,
+ ActionDoubleClick,
+ ActionDrag,
+ ActionKeypress,
+ ActionMove,
+ ActionScreenshot,
+ ActionScroll,
+ ActionType,
+ ActionWait,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class PendingSafetyCheck(BaseModel):
+ id: str
+ """The ID of the pending safety check."""
+
+ code: str
+ """The type of the pending safety check."""
+
+ message: str
+ """Details about the pending safety check."""
+
+
+class ResponseComputerToolCall(BaseModel):
+ id: str
+ """The unique ID of the computer call."""
+
+ action: Action
+ """A click action."""
+
+ call_id: str
+ """An identifier used when responding to the tool call with output."""
+
+ pending_safety_checks: List[PendingSafetyCheck]
+ """The pending safety checks for the computer call."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ type: Literal["computer_call"]
+ """The type of the computer call. Always `computer_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py
new file mode 100644
index 00000000..a2dd68f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot
+
+__all__ = ["ResponseComputerToolCallOutputItem", "AcknowledgedSafetyCheck"]
+
+
+class AcknowledgedSafetyCheck(BaseModel):
+ id: str
+ """The ID of the pending safety check."""
+
+ code: str
+ """The type of the pending safety check."""
+
+ message: str
+ """Details about the pending safety check."""
+
+
+class ResponseComputerToolCallOutputItem(BaseModel):
+ id: str
+ """The unique ID of the computer call tool output."""
+
+ call_id: str
+ """The ID of the computer tool call that produced the output."""
+
+ output: ResponseComputerToolCallOutputScreenshot
+ """A computer screenshot image used with the computer use tool."""
+
+ type: Literal["computer_call_output"]
+ """The type of the computer tool call output. Always `computer_call_output`."""
+
+ acknowledged_safety_checks: Optional[List[AcknowledgedSafetyCheck]] = None
+ """
+ The safety checks reported by the API that have been acknowledged by the
+ developer.
+ """
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the message input.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+ are returned via API.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py
new file mode 100644
index 00000000..a500da85
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseComputerToolCallOutputScreenshot"]
+
+
+class ResponseComputerToolCallOutputScreenshot(BaseModel):
+ type: Literal["computer_screenshot"]
+ """Specifies the event type.
+
+ For a computer screenshot, this property is always set to `computer_screenshot`.
+ """
+
+ file_id: Optional[str] = None
+ """The identifier of an uploaded file that contains the screenshot."""
+
+ image_url: Optional[str] = None
+ """The URL of the screenshot image."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py
new file mode 100644
index 00000000..efc2028a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseComputerToolCallOutputScreenshotParam"]
+
+
+class ResponseComputerToolCallOutputScreenshotParam(TypedDict, total=False):
+ type: Required[Literal["computer_screenshot"]]
+ """Specifies the event type.
+
+ For a computer screenshot, this property is always set to `computer_screenshot`.
+ """
+
+ file_id: str
+ """The identifier of an uploaded file that contains the screenshot."""
+
+ image_url: str
+ """The URL of the screenshot image."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py
new file mode 100644
index 00000000..d4ef56ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py
@@ -0,0 +1,208 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+ "ResponseComputerToolCallParam",
+ "Action",
+ "ActionClick",
+ "ActionDoubleClick",
+ "ActionDrag",
+ "ActionDragPath",
+ "ActionKeypress",
+ "ActionMove",
+ "ActionScreenshot",
+ "ActionScroll",
+ "ActionType",
+ "ActionWait",
+ "PendingSafetyCheck",
+]
+
+
+class ActionClick(TypedDict, total=False):
+ button: Required[Literal["left", "right", "wheel", "back", "forward"]]
+ """Indicates which mouse button was pressed during the click.
+
+ One of `left`, `right`, `wheel`, `back`, or `forward`.
+ """
+
+ type: Required[Literal["click"]]
+ """Specifies the event type.
+
+ For a click action, this property is always set to `click`.
+ """
+
+ x: Required[int]
+ """The x-coordinate where the click occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the click occurred."""
+
+
+class ActionDoubleClick(TypedDict, total=False):
+ type: Required[Literal["double_click"]]
+ """Specifies the event type.
+
+ For a double click action, this property is always set to `double_click`.
+ """
+
+ x: Required[int]
+ """The x-coordinate where the double click occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the double click occurred."""
+
+
+class ActionDragPath(TypedDict, total=False):
+ x: Required[int]
+ """The x-coordinate."""
+
+ y: Required[int]
+ """The y-coordinate."""
+
+
+class ActionDrag(TypedDict, total=False):
+ path: Required[Iterable[ActionDragPath]]
+ """An array of coordinates representing the path of the drag action.
+
+ Coordinates will appear as an array of objects, eg
+
+ ```
+ [
+ { x: 100, y: 200 },
+ { x: 200, y: 300 }
+ ]
+ ```
+ """
+
+ type: Required[Literal["drag"]]
+ """Specifies the event type.
+
+ For a drag action, this property is always set to `drag`.
+ """
+
+
+class ActionKeypress(TypedDict, total=False):
+ keys: Required[List[str]]
+ """The combination of keys the model is requesting to be pressed.
+
+ This is an array of strings, each representing a key.
+ """
+
+ type: Required[Literal["keypress"]]
+ """Specifies the event type.
+
+ For a keypress action, this property is always set to `keypress`.
+ """
+
+
+class ActionMove(TypedDict, total=False):
+ type: Required[Literal["move"]]
+ """Specifies the event type.
+
+ For a move action, this property is always set to `move`.
+ """
+
+ x: Required[int]
+ """The x-coordinate to move to."""
+
+ y: Required[int]
+ """The y-coordinate to move to."""
+
+
+class ActionScreenshot(TypedDict, total=False):
+ type: Required[Literal["screenshot"]]
+ """Specifies the event type.
+
+ For a screenshot action, this property is always set to `screenshot`.
+ """
+
+
+class ActionScroll(TypedDict, total=False):
+ scroll_x: Required[int]
+ """The horizontal scroll distance."""
+
+ scroll_y: Required[int]
+ """The vertical scroll distance."""
+
+ type: Required[Literal["scroll"]]
+ """Specifies the event type.
+
+ For a scroll action, this property is always set to `scroll`.
+ """
+
+ x: Required[int]
+ """The x-coordinate where the scroll occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the scroll occurred."""
+
+
+class ActionType(TypedDict, total=False):
+ text: Required[str]
+ """The text to type."""
+
+ type: Required[Literal["type"]]
+ """Specifies the event type.
+
+ For a type action, this property is always set to `type`.
+ """
+
+
+class ActionWait(TypedDict, total=False):
+ type: Required[Literal["wait"]]
+ """Specifies the event type.
+
+ For a wait action, this property is always set to `wait`.
+ """
+
+
+Action: TypeAlias = Union[
+ ActionClick,
+ ActionDoubleClick,
+ ActionDrag,
+ ActionKeypress,
+ ActionMove,
+ ActionScreenshot,
+ ActionScroll,
+ ActionType,
+ ActionWait,
+]
+
+
+class PendingSafetyCheck(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the pending safety check."""
+
+ code: Required[str]
+ """The type of the pending safety check."""
+
+ message: Required[str]
+ """Details about the pending safety check."""
+
+
+class ResponseComputerToolCallParam(TypedDict, total=False):
+ id: Required[str]
+ """The unique ID of the computer call."""
+
+ action: Required[Action]
+ """A click action."""
+
+ call_id: Required[str]
+ """An identifier used when responding to the tool call with output."""
+
+ pending_safety_checks: Required[Iterable[PendingSafetyCheck]]
+ """The pending safety checks for the computer call."""
+
+ status: Required[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ type: Required[Literal["computer_call"]]
+ """The type of the computer call. Always `computer_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py
new file mode 100644
index 00000000..93f5ec4b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseContentPartAddedEvent", "Part"]
+
+Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseContentPartAddedEvent(BaseModel):
+ content_index: int
+ """The index of the content part that was added."""
+
+ item_id: str
+ """The ID of the output item that the content part was added to."""
+
+ output_index: int
+ """The index of the output item that the content part was added to."""
+
+ part: Part
+ """The content part that was added."""
+
+ type: Literal["response.content_part.added"]
+ """The type of the event. Always `response.content_part.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py
new file mode 100644
index 00000000..4ec07398
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseContentPartDoneEvent", "Part"]
+
+Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseContentPartDoneEvent(BaseModel):
+ content_index: int
+ """The index of the content part that is done."""
+
+ item_id: str
+ """The ID of the output item that the content part was added to."""
+
+ output_index: int
+ """The index of the output item that the content part was added to."""
+
+ part: Part
+ """The content part that is done."""
+
+ type: Literal["response.content_part.done"]
+ """The type of the event. Always `response.content_part.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py
new file mode 100644
index 00000000..651050c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py
@@ -0,0 +1,204 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .tool_param import ToolParam
+from .response_includable import ResponseIncludable
+from .tool_choice_options import ToolChoiceOptions
+from .response_input_param import ResponseInputParam
+from ..shared_params.metadata import Metadata
+from .tool_choice_types_param import ToolChoiceTypesParam
+from ..shared_params.reasoning import Reasoning
+from .response_text_config_param import ResponseTextConfigParam
+from .tool_choice_function_param import ToolChoiceFunctionParam
+from ..shared_params.responses_model import ResponsesModel
+
+__all__ = [
+ "ResponseCreateParamsBase",
+ "ToolChoice",
+ "ResponseCreateParamsNonStreaming",
+ "ResponseCreateParamsStreaming",
+]
+
+
+class ResponseCreateParamsBase(TypedDict, total=False):
+ input: Required[Union[str, ResponseInputParam]]
+ """Text, image, or file inputs to the model, used to generate a response.
+
+ Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+ """
+
+ model: Required[ResponsesModel]
+ """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+ OpenAI offers a wide range of models with different capabilities, performance
+ characteristics, and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+ """
+
+ include: Optional[List[ResponseIncludable]]
+ """Specify additional output data to include in the model response.
+
+ Currently supported values are:
+
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ """
+
+ instructions: Optional[str]
+ """
+ Inserts a system (or developer) message as the first item in the model's
+ context.
+
+ When using along with `previous_response_id`, the instructions from a previous
+ response will be not be carried over to the next response. This makes it simple
+ to swap out system (or developer) messages in new responses.
+ """
+
+ max_output_tokens: Optional[int]
+ """
+ An upper bound for the number of tokens that can be generated for a response,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ parallel_tool_calls: Optional[bool]
+ """Whether to allow the model to run tool calls in parallel."""
+
+ previous_response_id: Optional[str]
+ """The unique ID of the previous response to the model.
+
+ Use this to create multi-turn conversations. Learn more about
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ """
+
+ reasoning: Optional[Reasoning]
+ """**o-series models only**
+
+ Configuration options for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ store: Optional[bool]
+ """Whether to store the generated model response for later retrieval via API."""
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ text: ResponseTextConfigParam
+ """Configuration options for a text response from the model.
+
+ Can be plain text or structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+ """
+
+ tool_choice: ToolChoice
+ """
+ How the model should select which tool (or tools) to use when generating a
+ response. See the `tools` parameter to see how to specify which tools the model
+ can call.
+ """
+
+ tools: Iterable[ToolParam]
+ """An array of tools the model may call while generating a response.
+
+ You can specify which tool to use by setting the `tool_choice` parameter.
+
+ The two categories of tools you can provide the model are:
+
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+ capabilities, like
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
+ the model to call your own code. Learn more about
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ truncation: Optional[Literal["auto", "disabled"]]
+ """The truncation strategy to use for the model response.
+
+ - `auto`: If the context of this response and previous ones exceeds the model's
+ context window size, the model will truncate the response to fit the context
+ window by dropping input items in the middle of the conversation.
+ - `disabled` (default): If a model response will exceed the context window size
+ for a model, the request will fail with a 400 error.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ """
+
+
+ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypesParam, ToolChoiceFunctionParam]
+
+
+class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+ """
+
+
+class ResponseCreateParamsStreaming(ResponseCreateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+ """
+
+
+ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py
new file mode 100644
index 00000000..7a524cec
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseCreatedEvent"]
+
+
+class ResponseCreatedEvent(BaseModel):
+ response: Response
+ """The response that was created."""
+
+ type: Literal["response.created"]
+ """The type of the event. Always `response.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py
new file mode 100644
index 00000000..90f1fcf5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseError"]
+
+
+class ResponseError(BaseModel):
+ code: Literal[
+ "server_error",
+ "rate_limit_exceeded",
+ "invalid_prompt",
+ "vector_store_timeout",
+ "invalid_image",
+ "invalid_image_format",
+ "invalid_base64_image",
+ "invalid_image_url",
+ "image_too_large",
+ "image_too_small",
+ "image_parse_error",
+ "image_content_policy_violation",
+ "invalid_image_mode",
+ "image_file_too_large",
+ "unsupported_image_media_type",
+ "empty_image_file",
+ "failed_to_download_image",
+ "image_file_not_found",
+ ]
+ """The error code for the response."""
+
+ message: str
+ """A human-readable description of the error."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py
new file mode 100644
index 00000000..1b7e605d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseErrorEvent"]
+
+
+class ResponseErrorEvent(BaseModel):
+ code: Optional[str] = None
+ """The error code."""
+
+ message: str
+ """The error message."""
+
+ param: Optional[str] = None
+ """The error parameter."""
+
+ type: Literal["error"]
+ """The type of the event. Always `error`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py
new file mode 100644
index 00000000..3e8f75d8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseFailedEvent"]
+
+
+class ResponseFailedEvent(BaseModel):
+ response: Response
+ """The response that failed."""
+
+ type: Literal["response.failed"]
+ """The type of the event. Always `response.failed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py
new file mode 100644
index 00000000..4b860833
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallCompletedEvent"]
+
+
+class ResponseFileSearchCallCompletedEvent(BaseModel):
+ item_id: str
+ """The ID of the output item that the file search call is initiated."""
+
+ output_index: int
+ """The index of the output item that the file search call is initiated."""
+
+ type: Literal["response.file_search_call.completed"]
+ """The type of the event. Always `response.file_search_call.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py
new file mode 100644
index 00000000..eb42e3da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallInProgressEvent"]
+
+
+class ResponseFileSearchCallInProgressEvent(BaseModel):
+ item_id: str
+ """The ID of the output item that the file search call is initiated."""
+
+ output_index: int
+ """The index of the output item that the file search call is initiated."""
+
+ type: Literal["response.file_search_call.in_progress"]
+ """The type of the event. Always `response.file_search_call.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py
new file mode 100644
index 00000000..3cd8905d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallSearchingEvent"]
+
+
+class ResponseFileSearchCallSearchingEvent(BaseModel):
+ item_id: str
+ """The ID of the output item that the file search call is initiated."""
+
+ output_index: int
+ """The index of the output item that the file search call is searching."""
+
+ type: Literal["response.file_search_call.searching"]
+ """The type of the event. Always `response.file_search_call.searching`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py
new file mode 100644
index 00000000..ef1c6a56
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchToolCall", "Result"]
+
+
+class Result(BaseModel):
+ attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ file_id: Optional[str] = None
+ """The unique ID of the file."""
+
+ filename: Optional[str] = None
+ """The name of the file."""
+
+ score: Optional[float] = None
+ """The relevance score of the file - a value between 0 and 1."""
+
+ text: Optional[str] = None
+ """The text that was retrieved from the file."""
+
+
+class ResponseFileSearchToolCall(BaseModel):
+ id: str
+ """The unique ID of the file search tool call."""
+
+ queries: List[str]
+ """The queries used to search for files."""
+
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"]
+ """The status of the file search tool call.
+
+ One of `in_progress`, `searching`, `incomplete` or `failed`,
+ """
+
+ type: Literal["file_search_call"]
+ """The type of the file search tool call. Always `file_search_call`."""
+
+ results: Optional[List[Result]] = None
+ """The results of the file search tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py
new file mode 100644
index 00000000..9a4177cf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFileSearchToolCallParam", "Result"]
+
+
+class Result(TypedDict, total=False):
+ attributes: Optional[Dict[str, Union[str, float, bool]]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ file_id: str
+ """The unique ID of the file."""
+
+ filename: str
+ """The name of the file."""
+
+ score: float
+ """The relevance score of the file - a value between 0 and 1."""
+
+ text: str
+ """The text that was retrieved from the file."""
+
+
+class ResponseFileSearchToolCallParam(TypedDict, total=False):
+ id: Required[str]
+ """The unique ID of the file search tool call."""
+
+ queries: Required[List[str]]
+ """The queries used to search for files."""
+
+ status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]]
+ """The status of the file search tool call.
+
+ One of `in_progress`, `searching`, `incomplete` or `failed`,
+ """
+
+ type: Required[Literal["file_search_call"]]
+ """The type of the file search tool call. Always `file_search_call`."""
+
+ results: Optional[Iterable[Result]]
+ """The results of the file search tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py
new file mode 100644
index 00000000..a4896bf9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..shared.response_format_text import ResponseFormatText
+from ..shared.response_format_json_object import ResponseFormatJSONObject
+from .response_format_text_json_schema_config import ResponseFormatTextJSONSchemaConfig
+
+__all__ = ["ResponseFormatTextConfig"]
+
+ResponseFormatTextConfig: TypeAlias = Annotated[
+ Union[ResponseFormatText, ResponseFormatTextJSONSchemaConfig, ResponseFormatJSONObject],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py
new file mode 100644
index 00000000..fcaf8f3f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from ..shared_params.response_format_text import ResponseFormatText
+from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from .response_format_text_json_schema_config_param import ResponseFormatTextJSONSchemaConfigParam
+
+__all__ = ["ResponseFormatTextConfigParam"]
+
+ResponseFormatTextConfigParam: TypeAlias = Union[
+ ResponseFormatText, ResponseFormatTextJSONSchemaConfigParam, ResponseFormatJSONObject
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py
new file mode 100644
index 00000000..3cf06637
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatTextJSONSchemaConfig"]
+
+
+class ResponseFormatTextJSONSchemaConfig(BaseModel):
+ schema_: Dict[str, object] = FieldInfo(alias="schema")
+ """
+ The schema for the response format, described as a JSON Schema object. Learn how
+ to build JSON schemas [here](https://json-schema.org/).
+ """
+
+ type: Literal["json_schema"]
+ """The type of response format being defined. Always `json_schema`."""
+
+ description: Optional[str] = None
+ """
+ A description of what the response format is for, used by the model to determine
+ how to respond in the format.
+ """
+
+ name: Optional[str] = None
+ """The name of the response format.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ strict: Optional[bool] = None
+ """
+ Whether to enable strict schema adherence when generating the output. If set to
+ true, the model will always follow the exact schema defined in the `schema`
+ field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+ learn more, read the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py
new file mode 100644
index 00000000..211c5d1e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatTextJSONSchemaConfigParam"]
+
+
+class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False):
+ schema: Required[Dict[str, object]]
+ """
+ The schema for the response format, described as a JSON Schema object. Learn how
+ to build JSON schemas [here](https://json-schema.org/).
+ """
+
+ type: Required[Literal["json_schema"]]
+ """The type of response format being defined. Always `json_schema`."""
+
+ description: str
+ """
+ A description of what the response format is for, used by the model to determine
+ how to respond in the format.
+ """
+
+ name: str
+ """The name of the response format.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ strict: Optional[bool]
+ """
+ Whether to enable strict schema adherence when generating the output. If set to
+ true, the model will always follow the exact schema defined in the `schema`
+ field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+ learn more, read the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py
new file mode 100644
index 00000000..0989b7ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"]
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
+ delta: str
+ """The function-call arguments delta that is added."""
+
+ item_id: str
+ """The ID of the output item that the function-call arguments delta is added to."""
+
+ output_index: int
+ """
+ The index of the output item that the function-call arguments delta is added to.
+ """
+
+ type: Literal["response.function_call_arguments.delta"]
+ """The type of the event. Always `response.function_call_arguments.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py
new file mode 100644
index 00000000..1d805a57
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDoneEvent"]
+
+
+class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
+ arguments: str
+ """The function-call arguments."""
+
+ item_id: str
+ """The ID of the item."""
+
+ output_index: int
+ """The index of the output item."""
+
+ type: Literal["response.function_call_arguments.done"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py
new file mode 100644
index 00000000..2a848220
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionToolCall"]
+
+
+class ResponseFunctionToolCall(BaseModel):
+ arguments: str
+ """A JSON string of the arguments to pass to the function."""
+
+ call_id: str
+ """The unique ID of the function tool call generated by the model."""
+
+ name: str
+ """The name of the function to run."""
+
+ type: Literal["function_call"]
+ """The type of the function tool call. Always `function_call`."""
+
+ id: Optional[str] = None
+ """The unique ID of the function tool call."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py
new file mode 100644
index 00000000..25984f94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .response_function_tool_call import ResponseFunctionToolCall
+
+__all__ = ["ResponseFunctionToolCallItem"]
+
+
+class ResponseFunctionToolCallItem(ResponseFunctionToolCall):
+ id: str # type: ignore
+ """The unique ID of the function tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py
new file mode 100644
index 00000000..4c8c41a6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionToolCallOutputItem"]
+
+
+class ResponseFunctionToolCallOutputItem(BaseModel):
+ id: str
+ """The unique ID of the function call tool output."""
+
+ call_id: str
+ """The unique ID of the function tool call generated by the model."""
+
+ output: str
+ """A JSON string of the output of the function tool call."""
+
+ type: Literal["function_call_output"]
+ """The type of the function tool call output. Always `function_call_output`."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py
new file mode 100644
index 00000000..eaa263cf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFunctionToolCallParam"]
+
+
+class ResponseFunctionToolCallParam(TypedDict, total=False):
+ arguments: Required[str]
+ """A JSON string of the arguments to pass to the function."""
+
+ call_id: Required[str]
+ """The unique ID of the function tool call generated by the model."""
+
+ name: Required[str]
+ """The name of the function to run."""
+
+ type: Required[Literal["function_call"]]
+ """The type of the function tool call. Always `function_call`."""
+
+ id: str
+ """The unique ID of the function tool call."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py
new file mode 100644
index 00000000..44734b68
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionWebSearch"]
+
+
+class ResponseFunctionWebSearch(BaseModel):
+ id: str
+ """The unique ID of the web search tool call."""
+
+ status: Literal["in_progress", "searching", "completed", "failed"]
+ """The status of the web search tool call."""
+
+ type: Literal["web_search_call"]
+ """The type of the web search tool call. Always `web_search_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py
new file mode 100644
index 00000000..d413e60b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFunctionWebSearchParam"]
+
+
+class ResponseFunctionWebSearchParam(TypedDict, total=False):
+ id: Required[str]
+ """The unique ID of the web search tool call."""
+
+ status: Required[Literal["in_progress", "searching", "completed", "failed"]]
+ """The status of the web search tool call."""
+
+ type: Required[Literal["web_search_call"]]
+ """The type of the web search tool call. Always `web_search_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py
new file mode 100644
index 00000000..7d96cbb8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseInProgressEvent"]
+
+
+class ResponseInProgressEvent(BaseModel):
+ response: Response
+ """The response that is in progress."""
+
+ type: Literal["response.in_progress"]
+ """The type of the event. Always `response.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py
new file mode 100644
index 00000000..83489fa7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ResponseIncludable"]
+
+ResponseIncludable: TypeAlias = Literal[
+ "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url"
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py
new file mode 100644
index 00000000..742b789c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseIncompleteEvent"]
+
+
+class ResponseIncompleteEvent(BaseModel):
+ response: Response
+ """The response that was incomplete."""
+
+ type: Literal["response.incomplete"]
+ """The type of the event. Always `response.incomplete`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py
new file mode 100644
index 00000000..1726909a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_input_file import ResponseInputFile
+from .response_input_text import ResponseInputText
+from .response_input_image import ResponseInputImage
+
+__all__ = ["ResponseInputContent"]
+
+ResponseInputContent: TypeAlias = Annotated[
+ Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py
new file mode 100644
index 00000000..7791cdfd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .response_input_file_param import ResponseInputFileParam
+from .response_input_text_param import ResponseInputTextParam
+from .response_input_image_param import ResponseInputImageParam
+
+__all__ = ["ResponseInputContentParam"]
+
+ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py
new file mode 100644
index 00000000..00b35dc8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputFile"]
+
+
+class ResponseInputFile(BaseModel):
+ type: Literal["input_file"]
+ """The type of the input item. Always `input_file`."""
+
+ file_data: Optional[str] = None
+ """The content of the file to be sent to the model."""
+
+ file_id: Optional[str] = None
+ """The ID of the file to be sent to the model."""
+
+ filename: Optional[str] = None
+ """The name of the file to be sent to the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py
new file mode 100644
index 00000000..dc06a4ea
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputFileParam"]
+
+
+class ResponseInputFileParam(TypedDict, total=False):
+ type: Required[Literal["input_file"]]
+ """The type of the input item. Always `input_file`."""
+
+ file_data: str
+ """The content of the file to be sent to the model."""
+
+ file_id: str
+ """The ID of the file to be sent to the model."""
+
+ filename: str
+ """The name of the file to be sent to the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py
new file mode 100644
index 00000000..d719f44e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputImage"]
+
+
+class ResponseInputImage(BaseModel):
+ detail: Literal["high", "low", "auto"]
+ """The detail level of the image to be sent to the model.
+
+ One of `high`, `low`, or `auto`. Defaults to `auto`.
+ """
+
+ type: Literal["input_image"]
+ """The type of the input item. Always `input_image`."""
+
+ file_id: Optional[str] = None
+ """The ID of the file to be sent to the model."""
+
+ image_url: Optional[str] = None
+ """The URL of the image to be sent to the model.
+
+ A fully qualified URL or base64 encoded image in a data URL.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py
new file mode 100644
index 00000000..5dd4db2b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputImageParam"]
+
+
+class ResponseInputImageParam(TypedDict, total=False):
+ detail: Required[Literal["high", "low", "auto"]]
+ """The detail level of the image to be sent to the model.
+
+ One of `high`, `low`, or `auto`. Defaults to `auto`.
+ """
+
+ type: Required[Literal["input_image"]]
+ """The type of the input item. Always `input_image`."""
+
+ file_id: Optional[str]
+ """The ID of the file to be sent to the model."""
+
+ image_url: Optional[str]
+ """The URL of the image to be sent to the model.
+
+ A fully qualified URL or base64 encoded image in a data URL.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py
new file mode 100644
index 00000000..2505f7c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py
@@ -0,0 +1,131 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .easy_input_message_param import EasyInputMessageParam
+from .response_output_message_param import ResponseOutputMessageParam
+from .response_reasoning_item_param import ResponseReasoningItemParam
+from .response_computer_tool_call_param import ResponseComputerToolCallParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
+
+__all__ = [
+ "ResponseInputItemParam",
+ "Message",
+ "ComputerCallOutput",
+ "ComputerCallOutputAcknowledgedSafetyCheck",
+ "FunctionCallOutput",
+ "ItemReference",
+]
+
+
+class Message(TypedDict, total=False):
+ content: Required[ResponseInputMessageContentListParam]
+ """
+ A list of one or many input items to the model, containing different content
+ types.
+ """
+
+ role: Required[Literal["user", "system", "developer"]]
+ """The role of the message input. One of `user`, `system`, or `developer`."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always set to `message`."""
+
+
+class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the pending safety check."""
+
+ code: Required[str]
+ """The type of the pending safety check."""
+
+ message: Required[str]
+ """Details about the pending safety check."""
+
+
+class ComputerCallOutput(TypedDict, total=False):
+ call_id: Required[str]
+ """The ID of the computer tool call that produced the output."""
+
+ output: Required[ResponseComputerToolCallOutputScreenshotParam]
+ """A computer screenshot image used with the computer use tool."""
+
+ type: Required[Literal["computer_call_output"]]
+ """The type of the computer tool call output. Always `computer_call_output`."""
+
+ id: str
+ """The ID of the computer tool call output."""
+
+ acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+ """
+ The safety checks reported by the API that have been acknowledged by the
+ developer.
+ """
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the message input.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+ are returned via API.
+ """
+
+
+class FunctionCallOutput(TypedDict, total=False):
+ call_id: Required[str]
+ """The unique ID of the function tool call generated by the model."""
+
+ output: Required[str]
+ """A JSON string of the output of the function tool call."""
+
+ type: Required[Literal["function_call_output"]]
+ """The type of the function tool call output. Always `function_call_output`."""
+
+ id: str
+ """The unique ID of the function tool call output.
+
+ Populated when this item is returned via API.
+ """
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+
+class ItemReference(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the item to reference."""
+
+ type: Required[Literal["item_reference"]]
+ """The type of item to reference. Always `item_reference`."""
+
+
+ResponseInputItemParam: TypeAlias = Union[
+ EasyInputMessageParam,
+ Message,
+ ResponseOutputMessageParam,
+ ResponseFileSearchToolCallParam,
+ ResponseComputerToolCallParam,
+ ComputerCallOutput,
+ ResponseFunctionWebSearchParam,
+ ResponseFunctionToolCallParam,
+ FunctionCallOutput,
+ ResponseReasoningItemParam,
+ ItemReference,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py
new file mode 100644
index 00000000..99b7c10f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .response_input_content import ResponseInputContent
+
+__all__ = ["ResponseInputMessageContentList"]
+
+ResponseInputMessageContentList: TypeAlias = List[ResponseInputContent]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py
new file mode 100644
index 00000000..080613df
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import TypeAlias
+
+from .response_input_file_param import ResponseInputFileParam
+from .response_input_text_param import ResponseInputTextParam
+from .response_input_image_param import ResponseInputImageParam
+
+__all__ = ["ResponseInputMessageContentListParam", "ResponseInputContentParam"]
+
+ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
+
+ResponseInputMessageContentListParam: TypeAlias = List[ResponseInputContentParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py
new file mode 100644
index 00000000..6a788e7f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_input_message_content_list import ResponseInputMessageContentList
+
+__all__ = ["ResponseInputMessageItem"]
+
+
+class ResponseInputMessageItem(BaseModel):
+ id: str
+ """The unique ID of the message input."""
+
+ content: ResponseInputMessageContentList
+ """
+ A list of one or many input items to the model, containing different content
+ types.
+ """
+
+ role: Literal["user", "system", "developer"]
+ """The role of the message input. One of `user`, `system`, or `developer`."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always set to `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py
new file mode 100644
index 00000000..84a80eb7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py
@@ -0,0 +1,134 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .easy_input_message_param import EasyInputMessageParam
+from .response_output_message_param import ResponseOutputMessageParam
+from .response_reasoning_item_param import ResponseReasoningItemParam
+from .response_computer_tool_call_param import ResponseComputerToolCallParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
+
+__all__ = [
+ "ResponseInputParam",
+ "ResponseInputItemParam",
+ "Message",
+ "ComputerCallOutput",
+ "ComputerCallOutputAcknowledgedSafetyCheck",
+ "FunctionCallOutput",
+ "ItemReference",
+]
+
+
+class Message(TypedDict, total=False):
+ content: Required[ResponseInputMessageContentListParam]
+ """
+ A list of one or many input items to the model, containing different content
+ types.
+ """
+
+ role: Required[Literal["user", "system", "developer"]]
+ """The role of the message input. One of `user`, `system`, or `developer`."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always set to `message`."""
+
+
+class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the pending safety check."""
+
+ code: Required[str]
+ """The type of the pending safety check."""
+
+ message: Required[str]
+ """Details about the pending safety check."""
+
+
+class ComputerCallOutput(TypedDict, total=False):
+ call_id: Required[str]
+ """The ID of the computer tool call that produced the output."""
+
+ output: Required[ResponseComputerToolCallOutputScreenshotParam]
+ """A computer screenshot image used with the computer use tool."""
+
+ type: Required[Literal["computer_call_output"]]
+ """The type of the computer tool call output. Always `computer_call_output`."""
+
+ id: str
+ """The ID of the computer tool call output."""
+
+ acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+ """
+ The safety checks reported by the API that have been acknowledged by the
+ developer.
+ """
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the message input.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+ are returned via API.
+ """
+
+
+class FunctionCallOutput(TypedDict, total=False):
+ call_id: Required[str]
+ """The unique ID of the function tool call generated by the model."""
+
+ output: Required[str]
+ """A JSON string of the output of the function tool call."""
+
+ type: Required[Literal["function_call_output"]]
+ """The type of the function tool call output. Always `function_call_output`."""
+
+ id: str
+ """The unique ID of the function tool call output.
+
+ Populated when this item is returned via API.
+ """
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+
+class ItemReference(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the item to reference."""
+
+ type: Required[Literal["item_reference"]]
+ """The type of item to reference. Always `item_reference`."""
+
+
+ResponseInputItemParam: TypeAlias = Union[
+ EasyInputMessageParam,
+ Message,
+ ResponseOutputMessageParam,
+ ResponseFileSearchToolCallParam,
+ ResponseComputerToolCallParam,
+ ComputerCallOutput,
+ ResponseFunctionWebSearchParam,
+ ResponseFunctionToolCallParam,
+ FunctionCallOutput,
+ ResponseReasoningItemParam,
+ ItemReference,
+]
+
+ResponseInputParam: TypeAlias = List[ResponseInputItemParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py
new file mode 100644
index 00000000..ba8d1ea1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputText"]
+
+
+class ResponseInputText(BaseModel):
+ text: str
+ """The text input to the model."""
+
+ type: Literal["input_text"]
+ """The type of the input item. Always `input_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py
new file mode 100644
index 00000000..f2ba8340
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputTextParam"]
+
+
+class ResponseInputTextParam(TypedDict, total=False):
+ text: Required[str]
+ """The text input to the model."""
+
+ type: Required[Literal["input_text"]]
+ """The type of the input item. Always `input_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py
new file mode 100644
index 00000000..dc8d67d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_output_message import ResponseOutputMessage
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_input_message_item import ResponseInputMessageItem
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_function_tool_call_item import ResponseFunctionToolCallItem
+from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
+from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
+
+__all__ = ["ResponseItem"]
+
+ResponseItem: TypeAlias = Annotated[
+ Union[
+ ResponseInputMessageItem,
+ ResponseOutputMessage,
+ ResponseFileSearchToolCall,
+ ResponseComputerToolCall,
+ ResponseComputerToolCallOutputItem,
+ ResponseFunctionWebSearch,
+ ResponseFunctionToolCallItem,
+ ResponseFunctionToolCallOutputItem,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py
new file mode 100644
index 00000000..b43eacdb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_item import ResponseItem
+
+__all__ = ["ResponseItemList"]
+
+
+class ResponseItemList(BaseModel):
+ data: List[ResponseItem]
+ """A list of items used to generate this response."""
+
+ first_id: str
+ """The ID of the first item in the list."""
+
+ has_more: bool
+ """Whether there are more items available."""
+
+ last_id: str
+ """The ID of the last item in the list."""
+
+ object: Literal["list"]
+ """The type of object returned, must be `list`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py
new file mode 100644
index 00000000..f1e96931
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_output_message import ResponseOutputMessage
+from .response_reasoning_item import ResponseReasoningItem
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_function_tool_call import ResponseFunctionToolCall
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+
+__all__ = ["ResponseOutputItem"]
+
+ResponseOutputItem: TypeAlias = Annotated[
+ Union[
+ ResponseOutputMessage,
+ ResponseFileSearchToolCall,
+ ResponseFunctionToolCall,
+ ResponseFunctionWebSearch,
+ ResponseComputerToolCall,
+ ResponseReasoningItem,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py
new file mode 100644
index 00000000..7344fb9a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_output_item import ResponseOutputItem
+
+__all__ = ["ResponseOutputItemAddedEvent"]
+
+
+class ResponseOutputItemAddedEvent(BaseModel):
+ item: ResponseOutputItem
+ """The output item that was added."""
+
+ output_index: int
+ """The index of the output item that was added."""
+
+ type: Literal["response.output_item.added"]
+ """The type of the event. Always `response.output_item.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py
new file mode 100644
index 00000000..a0a871a0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_output_item import ResponseOutputItem
+
+__all__ = ["ResponseOutputItemDoneEvent"]
+
+
+class ResponseOutputItemDoneEvent(BaseModel):
+ item: ResponseOutputItem
+ """The output item that was marked done."""
+
+ output_index: int
+ """The index of the output item that was marked done."""
+
+ type: Literal["response.output_item.done"]
+ """The type of the event. Always `response.output_item.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py
new file mode 100644
index 00000000..3864aa21
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseOutputMessage", "Content"]
+
+Content: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseOutputMessage(BaseModel):
+ id: str
+ """The unique ID of the output message."""
+
+ content: List[Content]
+ """The content of the output message."""
+
+ role: Literal["assistant"]
+ """The role of the output message. Always `assistant`."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the message input.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+ are returned via API.
+ """
+
+ type: Literal["message"]
+ """The type of the output message. Always `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py
new file mode 100644
index 00000000..46cbbd20
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .response_output_text_param import ResponseOutputTextParam
+from .response_output_refusal_param import ResponseOutputRefusalParam
+
+__all__ = ["ResponseOutputMessageParam", "Content"]
+
+Content: TypeAlias = Union[ResponseOutputTextParam, ResponseOutputRefusalParam]
+
+
+class ResponseOutputMessageParam(TypedDict, total=False):
+ id: Required[str]
+ """The unique ID of the output message."""
+
+ content: Required[Iterable[Content]]
+ """The content of the output message."""
+
+ role: Required[Literal["assistant"]]
+ """The role of the output message. Always `assistant`."""
+
+ status: Required[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the message input.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+ are returned via API.
+ """
+
+ type: Required[Literal["message"]]
+ """The type of the output message. Always `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py
new file mode 100644
index 00000000..eba58107
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseOutputRefusal"]
+
+
+class ResponseOutputRefusal(BaseModel):
+ refusal: str
+ """The refusal explanationfrom the model."""
+
+ type: Literal["refusal"]
+ """The type of the refusal. Always `refusal`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py
new file mode 100644
index 00000000..53140a60
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseOutputRefusalParam"]
+
+
+class ResponseOutputRefusalParam(TypedDict, total=False):
+ refusal: Required[str]
+ """The refusal explanationfrom the model."""
+
+ type: Required[Literal["refusal"]]
+ """The type of the refusal. Always `refusal`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py
new file mode 100644
index 00000000..fa653cd1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py
@@ -0,0 +1,64 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = ["ResponseOutputText", "Annotation", "AnnotationFileCitation", "AnnotationURLCitation", "AnnotationFilePath"]
+
+
+class AnnotationFileCitation(BaseModel):
+ file_id: str
+ """The ID of the file."""
+
+ index: int
+ """The index of the file in the list of files."""
+
+ type: Literal["file_citation"]
+ """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(BaseModel):
+ end_index: int
+ """The index of the last character of the URL citation in the message."""
+
+ start_index: int
+ """The index of the first character of the URL citation in the message."""
+
+ title: str
+ """The title of the web resource."""
+
+ type: Literal["url_citation"]
+ """The type of the URL citation. Always `url_citation`."""
+
+ url: str
+ """The URL of the web resource."""
+
+
+class AnnotationFilePath(BaseModel):
+ file_id: str
+ """The ID of the file."""
+
+ index: int
+ """The index of the file in the list of files."""
+
+ type: Literal["file_path"]
+ """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Annotated[
+ Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+]
+
+
+class ResponseOutputText(BaseModel):
+ annotations: List[Annotation]
+ """The annotations of the text output."""
+
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py
new file mode 100644
index 00000000..1f096728
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+ "ResponseOutputTextParam",
+ "Annotation",
+ "AnnotationFileCitation",
+ "AnnotationURLCitation",
+ "AnnotationFilePath",
+]
+
+
+class AnnotationFileCitation(TypedDict, total=False):
+ file_id: Required[str]
+ """The ID of the file."""
+
+ index: Required[int]
+ """The index of the file in the list of files."""
+
+ type: Required[Literal["file_citation"]]
+ """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(TypedDict, total=False):
+ end_index: Required[int]
+ """The index of the last character of the URL citation in the message."""
+
+ start_index: Required[int]
+ """The index of the first character of the URL citation in the message."""
+
+ title: Required[str]
+ """The title of the web resource."""
+
+ type: Required[Literal["url_citation"]]
+ """The type of the URL citation. Always `url_citation`."""
+
+ url: Required[str]
+ """The URL of the web resource."""
+
+
+class AnnotationFilePath(TypedDict, total=False):
+ file_id: Required[str]
+ """The ID of the file."""
+
+ index: Required[int]
+ """The index of the file in the list of files."""
+
+ type: Required[Literal["file_path"]]
+ """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath]
+
+
+class ResponseOutputTextParam(TypedDict, total=False):
+ annotations: Required[Iterable[Annotation]]
+ """The annotations of the text output."""
+
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py
new file mode 100644
index 00000000..57e5fbfe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningItem", "Summary"]
+
+
+class Summary(BaseModel):
+ text: str
+ """
+ A short summary of the reasoning used by the model when generating the response.
+ """
+
+ type: Literal["summary_text"]
+ """The type of the object. Always `summary_text`."""
+
+
+class ResponseReasoningItem(BaseModel):
+ id: str
+ """The unique identifier of the reasoning content."""
+
+ summary: List[Summary]
+ """Reasoning text contents."""
+
+ type: Literal["reasoning"]
+ """The type of the object. Always `reasoning`."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py
new file mode 100644
index 00000000..adb49d64
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseReasoningItemParam", "Summary"]
+
+
+class Summary(TypedDict, total=False):
+ text: Required[str]
+ """
+ A short summary of the reasoning used by the model when generating the response.
+ """
+
+ type: Required[Literal["summary_text"]]
+ """The type of the object. Always `summary_text`."""
+
+
+class ResponseReasoningItemParam(TypedDict, total=False):
+ id: Required[str]
+ """The unique identifier of the reasoning content."""
+
+ summary: Required[Iterable[Summary]]
+ """Reasoning text contents."""
+
+ type: Required[Literal["reasoning"]]
+ """The type of the object. Always `reasoning`."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py
new file mode 100644
index 00000000..04dcdf1c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseRefusalDeltaEvent"]
+
+
+class ResponseRefusalDeltaEvent(BaseModel):
+ content_index: int
+ """The index of the content part that the refusal text is added to."""
+
+ delta: str
+ """The refusal text that is added."""
+
+ item_id: str
+ """The ID of the output item that the refusal text is added to."""
+
+ output_index: int
+ """The index of the output item that the refusal text is added to."""
+
+ type: Literal["response.refusal.delta"]
+ """The type of the event. Always `response.refusal.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py
new file mode 100644
index 00000000..a9b6f4b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseRefusalDoneEvent"]
+
+
+class ResponseRefusalDoneEvent(BaseModel):
+ content_index: int
+ """The index of the content part that the refusal text is finalized."""
+
+ item_id: str
+ """The ID of the output item that the refusal text is finalized."""
+
+ output_index: int
+ """The index of the output item that the refusal text is finalized."""
+
+ refusal: str
+ """The refusal text that is finalized."""
+
+ type: Literal["response.refusal.done"]
+ """The type of the event. Always `response.refusal.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py
new file mode 100644
index 00000000..137bf4dc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import TypedDict
+
+from .response_includable import ResponseIncludable
+
+__all__ = ["ResponseRetrieveParams"]
+
+
+class ResponseRetrieveParams(TypedDict, total=False):
+ include: List[ResponseIncludable]
+ """Additional fields to include in the response.
+
+ See the `include` parameter for Response creation above for more information.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py
new file mode 100644
index 00000000..934d17cd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ResponseStatus"]
+
+ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "incomplete"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py
new file mode 100644
index 00000000..446863b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_error_event import ResponseErrorEvent
+from .response_failed_event import ResponseFailedEvent
+from .response_created_event import ResponseCreatedEvent
+from .response_completed_event import ResponseCompletedEvent
+from .response_text_done_event import ResponseTextDoneEvent
+from .response_audio_done_event import ResponseAudioDoneEvent
+from .response_incomplete_event import ResponseIncompleteEvent
+from .response_text_delta_event import ResponseTextDeltaEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent
+from .response_in_progress_event import ResponseInProgressEvent
+from .response_refusal_done_event import ResponseRefusalDoneEvent
+from .response_refusal_delta_event import ResponseRefusalDeltaEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
+from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent
+from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
+from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent
+from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
+from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
+from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
+from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
+from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
+from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
+from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent
+from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent
+from .response_code_interpreter_call_code_delta_event import ResponseCodeInterpreterCallCodeDeltaEvent
+from .response_code_interpreter_call_in_progress_event import ResponseCodeInterpreterCallInProgressEvent
+from .response_code_interpreter_call_interpreting_event import ResponseCodeInterpreterCallInterpretingEvent
+
+__all__ = ["ResponseStreamEvent"]
+
+ResponseStreamEvent: TypeAlias = Annotated[
+ Union[
+ ResponseAudioDeltaEvent,
+ ResponseAudioDoneEvent,
+ ResponseAudioTranscriptDeltaEvent,
+ ResponseAudioTranscriptDoneEvent,
+ ResponseCodeInterpreterCallCodeDeltaEvent,
+ ResponseCodeInterpreterCallCodeDoneEvent,
+ ResponseCodeInterpreterCallCompletedEvent,
+ ResponseCodeInterpreterCallInProgressEvent,
+ ResponseCodeInterpreterCallInterpretingEvent,
+ ResponseCompletedEvent,
+ ResponseContentPartAddedEvent,
+ ResponseContentPartDoneEvent,
+ ResponseCreatedEvent,
+ ResponseErrorEvent,
+ ResponseFileSearchCallCompletedEvent,
+ ResponseFileSearchCallInProgressEvent,
+ ResponseFileSearchCallSearchingEvent,
+ ResponseFunctionCallArgumentsDeltaEvent,
+ ResponseFunctionCallArgumentsDoneEvent,
+ ResponseInProgressEvent,
+ ResponseFailedEvent,
+ ResponseIncompleteEvent,
+ ResponseOutputItemAddedEvent,
+ ResponseOutputItemDoneEvent,
+ ResponseRefusalDeltaEvent,
+ ResponseRefusalDoneEvent,
+ ResponseTextAnnotationDeltaEvent,
+ ResponseTextDeltaEvent,
+ ResponseTextDoneEvent,
+ ResponseWebSearchCallCompletedEvent,
+ ResponseWebSearchCallInProgressEvent,
+ ResponseWebSearchCallSearchingEvent,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py
new file mode 100644
index 00000000..4f258228
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py
@@ -0,0 +1,79 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+ "ResponseTextAnnotationDeltaEvent",
+ "Annotation",
+ "AnnotationFileCitation",
+ "AnnotationURLCitation",
+ "AnnotationFilePath",
+]
+
+
+class AnnotationFileCitation(BaseModel):
+ file_id: str
+ """The ID of the file."""
+
+ index: int
+ """The index of the file in the list of files."""
+
+ type: Literal["file_citation"]
+ """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(BaseModel):
+ end_index: int
+ """The index of the last character of the URL citation in the message."""
+
+ start_index: int
+ """The index of the first character of the URL citation in the message."""
+
+ title: str
+ """The title of the web resource."""
+
+ type: Literal["url_citation"]
+ """The type of the URL citation. Always `url_citation`."""
+
+ url: str
+ """The URL of the web resource."""
+
+
+class AnnotationFilePath(BaseModel):
+ file_id: str
+ """The ID of the file."""
+
+ index: int
+ """The index of the file in the list of files."""
+
+ type: Literal["file_path"]
+ """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Annotated[
+ Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+]
+
+
+class ResponseTextAnnotationDeltaEvent(BaseModel):
+ annotation: Annotation
+ """A citation to a file."""
+
+ annotation_index: int
+ """The index of the annotation that was added."""
+
+ content_index: int
+ """The index of the content part that the text annotation was added to."""
+
+ item_id: str
+ """The ID of the output item that the text annotation was added to."""
+
+ output_index: int
+ """The index of the output item that the text annotation was added to."""
+
+ type: Literal["response.output_text.annotation.added"]
+ """The type of the event. Always `response.output_text.annotation.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py
new file mode 100644
index 00000000..a1894a91
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .response_format_text_config import ResponseFormatTextConfig
+
+__all__ = ["ResponseTextConfig"]
+
+
+class ResponseTextConfig(BaseModel):
+ format: Optional[ResponseFormatTextConfig] = None
+ """An object specifying the format that the model must output.
+
+ Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+ ensures the model will match your supplied JSON schema. Learn more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ The default format is `{ "type": "text" }` with no additional options.
+
+ **Not recommended for gpt-4o and newer models:**
+
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+ ensures the message the model generates is valid JSON. Using `json_schema` is
+ preferred for models that support it.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py
new file mode 100644
index 00000000..aec064bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .response_format_text_config_param import ResponseFormatTextConfigParam
+
+__all__ = ["ResponseTextConfigParam"]
+
+
+class ResponseTextConfigParam(TypedDict, total=False):
+ format: ResponseFormatTextConfigParam
+ """An object specifying the format that the model must output.
+
+ Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+ ensures the model will match your supplied JSON schema. Learn more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ The default format is `{ "type": "text" }` with no additional options.
+
+ **Not recommended for gpt-4o and newer models:**
+
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+ ensures the message the model generates is valid JSON. Using `json_schema` is
+ preferred for models that support it.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py
new file mode 100644
index 00000000..751a5e2a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseTextDeltaEvent"]
+
+
+class ResponseTextDeltaEvent(BaseModel):
+ content_index: int
+ """The index of the content part that the text delta was added to."""
+
+ delta: str
+ """The text delta that was added."""
+
+ item_id: str
+ """The ID of the output item that the text delta was added to."""
+
+ output_index: int
+ """The index of the output item that the text delta was added to."""
+
+ type: Literal["response.output_text.delta"]
+ """The type of the event. Always `response.output_text.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py
new file mode 100644
index 00000000..9b5c5e02
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseTextDoneEvent"]
+
+
+class ResponseTextDoneEvent(BaseModel):
+ content_index: int
+ """The index of the content part that the text content is finalized."""
+
+ item_id: str
+ """The ID of the output item that the text content is finalized."""
+
+ output_index: int
+ """The index of the output item that the text content is finalized."""
+
+ text: str
+ """The text content that is finalized."""
+
+ type: Literal["response.output_text.done"]
+ """The type of the event. Always `response.output_text.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py
new file mode 100644
index 00000000..9ad36bd3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"]
+
+
+class InputTokensDetails(BaseModel):
+ cached_tokens: int
+ """The number of tokens that were retrieved from the cache.
+
+ [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
+ """
+
+
+class OutputTokensDetails(BaseModel):
+ reasoning_tokens: int
+ """The number of reasoning tokens."""
+
+
+class ResponseUsage(BaseModel):
+ input_tokens: int
+ """The number of input tokens."""
+
+ input_tokens_details: InputTokensDetails
+ """A detailed breakdown of the input tokens."""
+
+ output_tokens: int
+ """The number of output tokens."""
+
+ output_tokens_details: OutputTokensDetails
+ """A detailed breakdown of the output tokens."""
+
+ total_tokens: int
+ """The total number of tokens used."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py
new file mode 100644
index 00000000..76f26766
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallCompletedEvent"]
+
+
+class ResponseWebSearchCallCompletedEvent(BaseModel):
+ item_id: str
+ """Unique ID for the output item associated with the web search call."""
+
+ output_index: int
+ """The index of the output item that the web search call is associated with."""
+
+ type: Literal["response.web_search_call.completed"]
+ """The type of the event. Always `response.web_search_call.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py
new file mode 100644
index 00000000..681ce6d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallInProgressEvent"]
+
+
+class ResponseWebSearchCallInProgressEvent(BaseModel):
+ item_id: str
+ """Unique ID for the output item associated with the web search call."""
+
+ output_index: int
+ """The index of the output item that the web search call is associated with."""
+
+ type: Literal["response.web_search_call.in_progress"]
+ """The type of the event. Always `response.web_search_call.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py
new file mode 100644
index 00000000..c885d989
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallSearchingEvent"]
+
+
+class ResponseWebSearchCallSearchingEvent(BaseModel):
+ item_id: str
+ """Unique ID for the output item associated with the web search call."""
+
+ output_index: int
+ """The index of the output item that the web search call is associated with."""
+
+ type: Literal["response.web_search_call.searching"]
+ """The type of the event. Always `response.web_search_call.searching`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool.py
new file mode 100644
index 00000000..de5d5524
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .computer_tool import ComputerTool
+from .function_tool import FunctionTool
+from .web_search_tool import WebSearchTool
+from .file_search_tool import FileSearchTool
+
+__all__ = ["Tool"]
+
+Tool: TypeAlias = Annotated[
+ Union[FileSearchTool, FunctionTool, ComputerTool, WebSearchTool], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py
new file mode 100644
index 00000000..8d2a4f28
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceFunction"]
+
+
+class ToolChoiceFunction(BaseModel):
+ name: str
+ """The name of the function to call."""
+
+ type: Literal["function"]
+ """For function calling, the type is always `function`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py
new file mode 100644
index 00000000..910537fd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceFunctionParam"]
+
+
+class ToolChoiceFunctionParam(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
+
+ type: Required[Literal["function"]]
+ """For function calling, the type is always `function`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py
new file mode 100644
index 00000000..c200db54
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ToolChoiceOptions"]
+
+ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py
new file mode 100644
index 00000000..4942808f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceTypes"]
+
+
+class ToolChoiceTypes(BaseModel):
+ type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+ """The type of hosted tool the model should to use.
+
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+
+ Allowed values are:
+
+ - `file_search`
+ - `web_search_preview`
+ - `computer_use_preview`
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py
new file mode 100644
index 00000000..b14f2a9e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceTypesParam"]
+
+
+class ToolChoiceTypesParam(TypedDict, total=False):
+ type: Required[
+ Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+ ]
+ """The type of hosted tool the model should to use.
+
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+
+ Allowed values are:
+
+ - `file_search`
+ - `web_search_preview`
+ - `computer_use_preview`
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py
new file mode 100644
index 00000000..be1cf824
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .computer_tool_param import ComputerToolParam
+from .function_tool_param import FunctionToolParam
+from .web_search_tool_param import WebSearchToolParam
+from .file_search_tool_param import FileSearchToolParam
+from ..chat.chat_completion_tool_param import ChatCompletionToolParam
+
+__all__ = ["ToolParam"]
+
+ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, ComputerToolParam, WebSearchToolParam]
+
+ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py
new file mode 100644
index 00000000..bee270bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["WebSearchTool", "UserLocation"]
+
+
+class UserLocation(BaseModel):
+ type: Literal["approximate"]
+ """The type of location approximation. Always `approximate`."""
+
+ city: Optional[str] = None
+ """Free text input for the city of the user, e.g. `San Francisco`."""
+
+ country: Optional[str] = None
+ """
+ The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+ the user, e.g. `US`.
+ """
+
+ region: Optional[str] = None
+ """Free text input for the region of the user, e.g. `California`."""
+
+ timezone: Optional[str] = None
+ """
+ The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+ user, e.g. `America/Los_Angeles`.
+ """
+
+
+class WebSearchTool(BaseModel):
+ type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
+ """The type of the web search tool. One of:
+
+ - `web_search_preview`
+ - `web_search_preview_2025_03_11`
+ """
+
+ search_context_size: Optional[Literal["low", "medium", "high"]] = None
+ """
+ High level guidance for the amount of context window space to use for the
+ search. One of `low`, `medium`, or `high`. `medium` is the default.
+ """
+
+ user_location: Optional[UserLocation] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py
new file mode 100644
index 00000000..8ee36ffb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["WebSearchToolParam", "UserLocation"]
+
+
+class UserLocation(TypedDict, total=False):
+ type: Required[Literal["approximate"]]
+ """The type of location approximation. Always `approximate`."""
+
+ city: str
+ """Free text input for the city of the user, e.g. `San Francisco`."""
+
+ country: str
+ """
+ The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+ the user, e.g. `US`.
+ """
+
+ region: str
+ """Free text input for the region of the user, e.g. `California`."""
+
+ timezone: str
+ """
+ The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+ user, e.g. `America/Los_Angeles`.
+ """
+
+
+class WebSearchToolParam(TypedDict, total=False):
+ type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
+ """The type of the web search tool. One of:
+
+ - `web_search_preview`
+ - `web_search_preview_2025_03_11`
+ """
+
+ search_context_size: Literal["low", "medium", "high"]
+ """
+ High level guidance for the amount of context window space to use for the
+ search. One of `low`, `medium`, or `high`. `medium` is the default.
+ """
+
+ user_location: Optional[UserLocation]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py
new file mode 100644
index 00000000..6ad0ed5e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .metadata import Metadata as Metadata
+from .reasoning import Reasoning as Reasoning
+from .all_models import AllModels as AllModels
+from .chat_model import ChatModel as ChatModel
+from .error_object import ErrorObject as ErrorObject
+from .compound_filter import CompoundFilter as CompoundFilter
+from .responses_model import ResponsesModel as ResponsesModel
+from .reasoning_effort import ReasoningEffort as ReasoningEffort
+from .comparison_filter import ComparisonFilter as ComparisonFilter
+from .function_definition import FunctionDefinition as FunctionDefinition
+from .function_parameters import FunctionParameters as FunctionParameters
+from .response_format_text import ResponseFormatText as ResponseFormatText
+from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
+from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py b/.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py
new file mode 100644
index 00000000..db841077
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .chat_model import ChatModel
+
+__all__ = ["AllModels"]
+
+AllModels: TypeAlias = Union[
+ str, ChatModel, Literal["o1-pro", "o1-pro-2025-03-19", "computer-use-preview", "computer-use-preview-2025-03-11"]
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py
new file mode 100644
index 00000000..b1937572
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatModel"]
+
+ChatModel: TypeAlias = Literal[
+ "o3-mini",
+ "o3-mini-2025-01-31",
+ "o1",
+ "o1-2024-12-17",
+ "o1-preview",
+ "o1-preview-2024-09-12",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o",
+ "gpt-4o-2024-11-20",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-audio-preview",
+ "gpt-4o-audio-preview-2024-10-01",
+ "gpt-4o-audio-preview-2024-12-17",
+ "gpt-4o-mini-audio-preview",
+ "gpt-4o-mini-audio-preview-2024-12-17",
+ "gpt-4o-search-preview",
+ "gpt-4o-mini-search-preview",
+ "gpt-4o-search-preview-2025-03-11",
+ "gpt-4o-mini-search-preview-2025-03-11",
+ "chatgpt-4o-latest",
+ "gpt-4o-mini",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-0125-preview",
+ "gpt-4-turbo-preview",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-16k-0613",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py
new file mode 100644
index 00000000..2ec2651f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComparisonFilter"]
+
+
+class ComparisonFilter(BaseModel):
+ key: str
+ """The key to compare against the value."""
+
+ type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
+ """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+
+ - `eq`: equals
+ - `ne`: not equal
+ - `gt`: greater than
+ - `gte`: greater than or equal
+ - `lt`: less than
+ - `lte`: less than or equal
+ """
+
+ value: Union[str, float, bool]
+ """
+ The value to compare against the attribute key; supports string, number, or
+ boolean types.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py
new file mode 100644
index 00000000..3aefa436
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from .comparison_filter import ComparisonFilter
+
+__all__ = ["CompoundFilter", "Filter"]
+
+Filter: TypeAlias = Union[ComparisonFilter, object]
+
+
+class CompoundFilter(BaseModel):
+ filters: List[Filter]
+ """Array of filters to combine.
+
+ Items can be `ComparisonFilter` or `CompoundFilter`.
+ """
+
+ type: Literal["and", "or"]
+ """Type of operation: `and` or `or`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py b/.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py
new file mode 100644
index 00000000..32d7045e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ErrorObject"]
+
+
+class ErrorObject(BaseModel):
+ code: Optional[str] = None
+
+ message: str
+
+ param: Optional[str] = None
+
+ type: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py b/.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py
new file mode 100644
index 00000000..06baa231
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .function_parameters import FunctionParameters
+
+__all__ = ["FunctionDefinition"]
+
+
+class FunctionDefinition(BaseModel):
+ name: str
+ """The name of the function to be called.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ description: Optional[str] = None
+ """
+ A description of what the function does, used by the model to choose when and
+ how to call the function.
+ """
+
+ parameters: Optional[FunctionParameters] = None
+ """The parameters the functions accepts, described as a JSON Schema object.
+
+ See the [guide](https://platform.openai.com/docs/guides/function-calling) for
+ examples, and the
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ documentation about the format.
+
+ Omitting `parameters` defines a function with an empty parameter list.
+ """
+
+ strict: Optional[bool] = None
+ """Whether to enable strict schema adherence when generating the function call.
+
+ If set to true, the model will follow the exact schema defined in the
+ `parameters` field. Only a subset of JSON Schema is supported when `strict` is
+ `true`. Learn more about Structured Outputs in the
+ [function calling guide](docs/guides/function-calling).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py b/.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py
new file mode 100644
index 00000000..a3d83e34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["FunctionParameters"]
+
+FunctionParameters: TypeAlias = Dict[str, object]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py b/.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py
new file mode 100644
index 00000000..0da88c67
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["Metadata"]
+
+Metadata: TypeAlias = Dict[str, str]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py
new file mode 100644
index 00000000..78a396d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .reasoning_effort import ReasoningEffort
+
+__all__ = ["Reasoning"]
+
+
+class Reasoning(BaseModel):
+ effort: Optional[ReasoningEffort] = None
+ """**o-series models only**
+
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+ result in faster responses and fewer tokens used on reasoning in a response.
+ """
+
+ generate_summary: Optional[Literal["concise", "detailed"]] = None
+ """**computer_use_preview only**
+
+ A summary of the reasoning performed by the model. This can be useful for
+ debugging and understanding the model's reasoning process. One of `concise` or
+ `detailed`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py
new file mode 100644
index 00000000..ace21b67
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ReasoningEffort"]
+
+ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py
new file mode 100644
index 00000000..2aaa5dbd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatJSONObject"]
+
+
+class ResponseFormatJSONObject(BaseModel):
+ type: Literal["json_object"]
+ """The type of response format being defined. Always `json_object`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py
new file mode 100644
index 00000000..c7924446
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatJSONSchema", "JSONSchema"]
+
+
+class JSONSchema(BaseModel):
+ name: str
+ """The name of the response format.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ description: Optional[str] = None
+ """
+ A description of what the response format is for, used by the model to determine
+ how to respond in the format.
+ """
+
+ schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
+ """
+ The schema for the response format, described as a JSON Schema object. Learn how
+ to build JSON schemas [here](https://json-schema.org/).
+ """
+
+ strict: Optional[bool] = None
+ """
+ Whether to enable strict schema adherence when generating the output. If set to
+ true, the model will always follow the exact schema defined in the `schema`
+ field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+ learn more, read the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ """
+
+
+class ResponseFormatJSONSchema(BaseModel):
+ json_schema: JSONSchema
+ """Structured Outputs configuration options, including a JSON Schema."""
+
+ type: Literal["json_schema"]
+ """The type of response format being defined. Always `json_schema`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py
new file mode 100644
index 00000000..f0c8cfb7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatText"]
+
+
+class ResponseFormatText(BaseModel):
+ type: Literal["text"]
+ """The type of response format being defined. Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py
new file mode 100644
index 00000000..85f154fd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .chat_model import ChatModel
+
+__all__ = ["ResponsesModel"]
+
+ResponsesModel: TypeAlias = Union[
+ str, ChatModel, Literal["o1-pro", "o1-pro-2025-03-19", "computer-use-preview", "computer-use-preview-2025-03-11"]
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py
new file mode 100644
index 00000000..88947108
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .metadata import Metadata as Metadata
+from .reasoning import Reasoning as Reasoning
+from .chat_model import ChatModel as ChatModel
+from .compound_filter import CompoundFilter as CompoundFilter
+from .responses_model import ResponsesModel as ResponsesModel
+from .reasoning_effort import ReasoningEffort as ReasoningEffort
+from .comparison_filter import ComparisonFilter as ComparisonFilter
+from .function_definition import FunctionDefinition as FunctionDefinition
+from .function_parameters import FunctionParameters as FunctionParameters
+from .response_format_text import ResponseFormatText as ResponseFormatText
+from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
+from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py
new file mode 100644
index 00000000..ff81b07a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py
@@ -0,0 +1,53 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatModel"]
+
+ChatModel: TypeAlias = Literal[
+ "o3-mini",
+ "o3-mini-2025-01-31",
+ "o1",
+ "o1-2024-12-17",
+ "o1-preview",
+ "o1-preview-2024-09-12",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o",
+ "gpt-4o-2024-11-20",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-audio-preview",
+ "gpt-4o-audio-preview-2024-10-01",
+ "gpt-4o-audio-preview-2024-12-17",
+ "gpt-4o-mini-audio-preview",
+ "gpt-4o-mini-audio-preview-2024-12-17",
+ "gpt-4o-search-preview",
+ "gpt-4o-mini-search-preview",
+ "gpt-4o-search-preview-2025-03-11",
+ "gpt-4o-mini-search-preview-2025-03-11",
+ "chatgpt-4o-latest",
+ "gpt-4o-mini",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-0125-preview",
+ "gpt-4-turbo-preview",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-16k-0613",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py
new file mode 100644
index 00000000..38edd315
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ComparisonFilter"]
+
+
+class ComparisonFilter(TypedDict, total=False):
+ key: Required[str]
+ """The key to compare against the value."""
+
+ type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
+ """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+
+ - `eq`: equals
+ - `ne`: not equal
+ - `gt`: greater than
+ - `gte`: greater than or equal
+ - `lt`: less than
+ - `lte`: less than or equal
+ """
+
+ value: Required[Union[str, float, bool]]
+ """
+ The value to compare against the attribute key; supports string, number, or
+ boolean types.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py
new file mode 100644
index 00000000..d12e9b1b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .comparison_filter import ComparisonFilter
+
+__all__ = ["CompoundFilter", "Filter"]
+
+Filter: TypeAlias = Union[ComparisonFilter, object]
+
+
+class CompoundFilter(TypedDict, total=False):
+ filters: Required[Iterable[Filter]]
+ """Array of filters to combine.
+
+ Items can be `ComparisonFilter` or `CompoundFilter`.
+ """
+
+ type: Required[Literal["and", "or"]]
+ """Type of operation: `and` or `or`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py
new file mode 100644
index 00000000..d45ec13f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from .function_parameters import FunctionParameters
+
+__all__ = ["FunctionDefinition"]
+
+
+class FunctionDefinition(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to be called.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ description: str
+ """
+ A description of what the function does, used by the model to choose when and
+ how to call the function.
+ """
+
+ parameters: FunctionParameters
+ """The parameters the functions accepts, described as a JSON Schema object.
+
+ See the [guide](https://platform.openai.com/docs/guides/function-calling) for
+ examples, and the
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ documentation about the format.
+
+ Omitting `parameters` defines a function with an empty parameter list.
+ """
+
+ strict: Optional[bool]
+ """Whether to enable strict schema adherence when generating the function call.
+
+ If set to true, the model will follow the exact schema defined in the
+ `parameters` field. Only a subset of JSON Schema is supported when `strict` is
+ `true`. Learn more about Structured Outputs in the
+ [function calling guide](docs/guides/function-calling).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py
new file mode 100644
index 00000000..45fc742d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["FunctionParameters"]
+
+FunctionParameters: TypeAlias = Dict[str, object]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py
new file mode 100644
index 00000000..821650b4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["Metadata"]
+
+Metadata: TypeAlias = Dict[str, str]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py
new file mode 100644
index 00000000..2953b895
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypedDict
+
+from ..shared.reasoning_effort import ReasoningEffort
+
+__all__ = ["Reasoning"]
+
+
+class Reasoning(TypedDict, total=False):
+ effort: Optional[ReasoningEffort]
+ """**o-series models only**
+
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+ result in faster responses and fewer tokens used on reasoning in a response.
+ """
+
+ generate_summary: Optional[Literal["concise", "detailed"]]
+ """**computer_use_preview only**
+
+ A summary of the reasoning performed by the model. This can be useful for
+ debugging and understanding the model's reasoning process. One of `concise` or
+ `detailed`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py
new file mode 100644
index 00000000..6052c5ae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ReasoningEffort"]
+
+ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py
new file mode 100644
index 00000000..d4d1deaa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatJSONObject"]
+
+
+class ResponseFormatJSONObject(TypedDict, total=False):
+ type: Required[Literal["json_object"]]
+ """The type of response format being defined. Always `json_object`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py
new file mode 100644
index 00000000..5b0a13ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatJSONSchema", "JSONSchema"]
+
+
+class JSONSchema(TypedDict, total=False):
+ name: Required[str]
+ """The name of the response format.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ description: str
+ """
+ A description of what the response format is for, used by the model to determine
+ how to respond in the format.
+ """
+
+ schema: Dict[str, object]
+ """
+ The schema for the response format, described as a JSON Schema object. Learn how
+ to build JSON schemas [here](https://json-schema.org/).
+ """
+
+ strict: Optional[bool]
+ """
+ Whether to enable strict schema adherence when generating the output. If set to
+ true, the model will always follow the exact schema defined in the `schema`
+ field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+ learn more, read the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ """
+
+
+class ResponseFormatJSONSchema(TypedDict, total=False):
+ json_schema: Required[JSONSchema]
+ """Structured Outputs configuration options, including a JSON Schema."""
+
+ type: Required[Literal["json_schema"]]
+ """The type of response format being defined. Always `json_schema`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py
new file mode 100644
index 00000000..c3ef2b08
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatText"]
+
+
+class ResponseFormatText(TypedDict, total=False):
+ type: Required[Literal["text"]]
+ """The type of response format being defined. Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py
new file mode 100644
index 00000000..3bf0e137
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from ..shared.chat_model import ChatModel
+
+__all__ = ["ResponsesModel"]
+
+ResponsesModel: TypeAlias = Union[
+ str, ChatModel, Literal["o1-pro", "o1-pro-2025-03-19", "computer-use-preview", "computer-use-preview-2025-03-11"]
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py
new file mode 100644
index 00000000..2813bc66
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["StaticFileChunkingStrategy"]
+
+
+class StaticFileChunkingStrategy(BaseModel):
+ chunk_overlap_tokens: int
+ """The number of tokens that overlap between chunks. The default value is `400`.
+
+ Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+ """
+
+ max_chunk_size_tokens: int
+ """The maximum number of tokens in each chunk.
+
+ The default value is `800`. The minimum value is `100` and the maximum value is
+ `4096`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py
new file mode 100644
index 00000000..2a95dce5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .static_file_chunking_strategy import StaticFileChunkingStrategy
+
+__all__ = ["StaticFileChunkingStrategyObject"]
+
+
+class StaticFileChunkingStrategyObject(BaseModel):
+ static: StaticFileChunkingStrategy
+
+ type: Literal["static"]
+ """Always `static`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py
new file mode 100644
index 00000000..0cdf35c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam
+
+__all__ = ["StaticFileChunkingStrategyObjectParam"]
+
+
+class StaticFileChunkingStrategyObjectParam(TypedDict, total=False):
+ static: Required[StaticFileChunkingStrategyParam]
+
+ type: Required[Literal["static"]]
+ """Always `static`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py
new file mode 100644
index 00000000..f917ac56
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["StaticFileChunkingStrategyParam"]
+
+
+class StaticFileChunkingStrategyParam(TypedDict, total=False):
+ chunk_overlap_tokens: Required[int]
+ """The number of tokens that overlap between chunks. The default value is `400`.
+
+ Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+ """
+
+ max_chunk_size_tokens: Required[int]
+ """The maximum number of tokens in each chunk.
+
+ The default value is `800`. The minimum value is `100` and the maximum value is
+ `4096`.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/upload.py b/.venv/lib/python3.12/site-packages/openai/types/upload.py
new file mode 100644
index 00000000..914b69a8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/upload.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .file_object import FileObject
+
+__all__ = ["Upload"]
+
+
+class Upload(BaseModel):
+ id: str
+ """The Upload unique identifier, which can be referenced in API endpoints."""
+
+ bytes: int
+ """The intended number of bytes to be uploaded."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the Upload was created."""
+
+ expires_at: int
+ """The Unix timestamp (in seconds) for when the Upload will expire."""
+
+ filename: str
+ """The name of the file to be uploaded."""
+
+ object: Literal["upload"]
+ """The object type, which is always "upload"."""
+
+ purpose: str
+ """The intended purpose of the file.
+
+ [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose)
+ for acceptable values.
+ """
+
+ status: Literal["pending", "completed", "cancelled", "expired"]
+ """The status of the Upload."""
+
+ file: Optional[FileObject] = None
+ """The `File` object represents a document that has been uploaded to OpenAI."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py b/.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py
new file mode 100644
index 00000000..cce568d5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+__all__ = ["UploadCompleteParams"]
+
+
+class UploadCompleteParams(TypedDict, total=False):
+ part_ids: Required[List[str]]
+ """The ordered list of Part IDs."""
+
+ md5: str
+ """
+ The optional md5 checksum for the file contents to verify if the bytes uploaded
+ matches what you expect.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py
new file mode 100644
index 00000000..2ebabe6c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .file_purpose import FilePurpose
+
+__all__ = ["UploadCreateParams"]
+
+
+class UploadCreateParams(TypedDict, total=False):
+ bytes: Required[int]
+ """The number of bytes in the file you are uploading."""
+
+ filename: Required[str]
+ """The name of the file to upload."""
+
+ mime_type: Required[str]
+ """The MIME type of the file.
+
+ This must fall within the supported MIME types for your file purpose. See the
+ supported MIME types for assistants and vision.
+ """
+
+ purpose: Required[FilePurpose]
+ """The intended purpose of the uploaded file.
+
+ See the
+ [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py
new file mode 100644
index 00000000..41deb0ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .upload_part import UploadPart as UploadPart
+from .part_create_params import PartCreateParams as PartCreateParams
diff --git a/.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py
new file mode 100644
index 00000000..9851ca41
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ..._types import FileTypes
+
+__all__ = ["PartCreateParams"]
+
+
+class PartCreateParams(TypedDict, total=False):
+ data: Required[FileTypes]
+ """The chunk of bytes for this Part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py b/.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py
new file mode 100644
index 00000000..e09621d8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["UploadPart"]
+
+
+class UploadPart(BaseModel):
+ id: str
+ """The upload Part unique identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the Part was created."""
+
+ object: Literal["upload.part"]
+ """The object type, which is always `upload.part`."""
+
+ upload_id: str
+ """The ID of the Upload object that this Part was added to."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store.py
new file mode 100644
index 00000000..2473a442
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store.py
@@ -0,0 +1,82 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .shared.metadata import Metadata
+
+__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
+
+
+class FileCounts(BaseModel):
+ cancelled: int
+ """The number of files that were cancelled."""
+
+ completed: int
+ """The number of files that have been successfully processed."""
+
+ failed: int
+ """The number of files that have failed to process."""
+
+ in_progress: int
+ """The number of files that are currently being processed."""
+
+ total: int
+ """The total number of files."""
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Literal["last_active_at"]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: int
+ """The number of days after the anchor time that the vector store will expire."""
+
+
+class VectorStore(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the vector store was created."""
+
+ file_counts: FileCounts
+
+ last_active_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the vector store was last active."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ name: str
+ """The name of the vector store."""
+
+ object: Literal["vector_store"]
+ """The object type, which is always `vector_store`."""
+
+ status: Literal["expired", "in_progress", "completed"]
+ """
+ The status of the vector store, which can be either `expired`, `in_progress`, or
+ `completed`. A status of `completed` indicates that the vector store is ready
+ for use.
+ """
+
+ usage_bytes: int
+ """The total number of bytes used by the files in the vector store."""
+
+ expires_after: Optional[ExpiresAfter] = None
+ """The expiration policy for a vector store."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the vector store will expire."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py
new file mode 100644
index 00000000..365d0936
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .shared_params.metadata import Metadata
+from .file_chunking_strategy_param import FileChunkingStrategyParam
+
+__all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
+
+
+class VectorStoreCreateParams(TypedDict, total=False):
+ chunking_strategy: FileChunkingStrategyParam
+ """The chunking strategy used to chunk the file(s).
+
+ If not set, will use the `auto` strategy. Only applicable if `file_ids` is
+ non-empty.
+ """
+
+ expires_after: ExpiresAfter
+ """The expiration policy for a vector store."""
+
+ file_ids: List[str]
+ """
+ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ name: str
+ """The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py
new file mode 100644
index 00000000..dfac9ce8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["VectorStoreDeleted"]
+
+
+class VectorStoreDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["vector_store.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py
new file mode 100644
index 00000000..e26ff90a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["VectorStoreListParams"]
+
+
+class VectorStoreListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py
new file mode 100644
index 00000000..17573d0f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .shared_params.compound_filter import CompoundFilter
+from .shared_params.comparison_filter import ComparisonFilter
+
+__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"]
+
+
+class VectorStoreSearchParams(TypedDict, total=False):
+ query: Required[Union[str, List[str]]]
+ """A query string for a search"""
+
+ filters: Filters
+ """A filter to apply based on file attributes."""
+
+ max_num_results: int
+ """The maximum number of results to return.
+
+ This number should be between 1 and 50 inclusive.
+ """
+
+ ranking_options: RankingOptions
+ """Ranking options for search."""
+
+ rewrite_query: bool
+ """Whether to rewrite the natural language query for vector search."""
+
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(TypedDict, total=False):
+ ranker: Literal["auto", "default-2024-11-15"]
+
+ score_threshold: float
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py
new file mode 100644
index 00000000..d78b71bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["VectorStoreSearchResponse", "Content"]
+
+
+class Content(BaseModel):
+ text: str
+ """The text content returned from search."""
+
+ type: Literal["text"]
+ """The type of content."""
+
+
+class VectorStoreSearchResponse(BaseModel):
+ attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ content: List[Content]
+ """Content chunks from the file."""
+
+ file_id: str
+ """The ID of the vector store file."""
+
+ filename: str
+ """The name of the vector store file."""
+
+ score: float
+ """The similarity score for the result."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py
new file mode 100644
index 00000000..4f6ac639
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .shared_params.metadata import Metadata
+
+__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
+
+
+class VectorStoreUpdateParams(TypedDict, total=False):
+ expires_after: Optional[ExpiresAfter]
+ """The expiration policy for a vector store."""
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ name: Optional[str]
+ """The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py
new file mode 100644
index 00000000..96ce3014
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .file_list_params import FileListParams as FileListParams
+from .vector_store_file import VectorStoreFile as VectorStoreFile
+from .file_create_params import FileCreateParams as FileCreateParams
+from .file_update_params import FileUpdateParams as FileUpdateParams
+from .file_content_response import FileContentResponse as FileContentResponse
+from .vector_store_file_batch import VectorStoreFileBatch as VectorStoreFileBatch
+from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams
+from .vector_store_file_deleted import VectorStoreFileDeleted as VectorStoreFileDeleted
+from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py
new file mode 100644
index 00000000..1a470f75
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Required, TypedDict
+
+from ..file_chunking_strategy_param import FileChunkingStrategyParam
+
+__all__ = ["FileBatchCreateParams"]
+
+
+class FileBatchCreateParams(TypedDict, total=False):
+ file_ids: Required[List[str]]
+ """
+ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+ """
+
+ attributes: Optional[Dict[str, Union[str, float, bool]]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ chunking_strategy: FileChunkingStrategyParam
+ """The chunking strategy used to chunk the file(s).
+
+ If not set, will use the `auto` strategy. Only applicable if `file_ids` is
+ non-empty.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py
new file mode 100644
index 00000000..2a0a6c6a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FileBatchListFilesParams"]
+
+
+class FileBatchListFilesParams(TypedDict, total=False):
+ vector_store_id: Required[str]
+
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ filter: Literal["in_progress", "completed", "failed", "cancelled"]
+ """Filter by file status.
+
+ One of `in_progress`, `completed`, `failed`, `cancelled`.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py
new file mode 100644
index 00000000..32db2f2c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FileContentResponse"]
+
+
+class FileContentResponse(BaseModel):
+ text: Optional[str] = None
+ """The text content"""
+
+ type: Optional[str] = None
+ """The content type (currently only `"text"`)"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py
new file mode 100644
index 00000000..5b898925
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Optional
+from typing_extensions import Required, TypedDict
+
+from ..file_chunking_strategy_param import FileChunkingStrategyParam
+
+__all__ = ["FileCreateParams"]
+
+
+class FileCreateParams(TypedDict, total=False):
+ file_id: Required[str]
+ """
+ A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+ vector store should use. Useful for tools like `file_search` that can access
+ files.
+ """
+
+ attributes: Optional[Dict[str, Union[str, float, bool]]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ chunking_strategy: FileChunkingStrategyParam
+ """The chunking strategy used to chunk the file(s).
+
+ If not set, will use the `auto` strategy. Only applicable if `file_ids` is
+ non-empty.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py
new file mode 100644
index 00000000..867b5fb3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["FileListParams"]
+
+
+class FileListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ filter: Literal["in_progress", "completed", "failed", "cancelled"]
+ """Filter by file status.
+
+ One of `in_progress`, `completed`, `failed`, `cancelled`.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py
new file mode 100644
index 00000000..ebf540d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["FileUpdateParams"]
+
+
+class FileUpdateParams(TypedDict, total=False):
+ vector_store_id: Required[str]
+
+ attributes: Required[Optional[Dict[str, Union[str, float, bool]]]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py
new file mode 100644
index 00000000..b59a61df
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..file_chunking_strategy import FileChunkingStrategy
+
+__all__ = ["VectorStoreFile", "LastError"]
+
+
+class LastError(BaseModel):
+ code: Literal["server_error", "unsupported_file", "invalid_file"]
+ """One of `server_error` or `rate_limit_exceeded`."""
+
+ message: str
+ """A human-readable description of the error."""
+
+
+class VectorStoreFile(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the vector store file was created."""
+
+ last_error: Optional[LastError] = None
+ """The last error associated with this vector store file.
+
+ Will be `null` if there are no errors.
+ """
+
+ object: Literal["vector_store.file"]
+ """The object type, which is always `vector_store.file`."""
+
+ status: Literal["in_progress", "completed", "cancelled", "failed"]
+ """
+ The status of the vector store file, which can be either `in_progress`,
+ `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
+ vector store file is ready for use.
+ """
+
+ usage_bytes: int
+ """The total vector store usage in bytes.
+
+ Note that this may be different from the original file size.
+ """
+
+ vector_store_id: str
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ that the [File](https://platform.openai.com/docs/api-reference/files) is
+ attached to.
+ """
+
+ attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ chunking_strategy: Optional[FileChunkingStrategy] = None
+ """The strategy used to chunk the file."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py
new file mode 100644
index 00000000..57dbfbd8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["VectorStoreFileBatch", "FileCounts"]
+
+
+class FileCounts(BaseModel):
+ cancelled: int
+ """The number of files that where cancelled."""
+
+ completed: int
+ """The number of files that have been processed."""
+
+ failed: int
+ """The number of files that have failed to process."""
+
+ in_progress: int
+ """The number of files that are currently being processed."""
+
+ total: int
+ """The total number of files."""
+
+
+class VectorStoreFileBatch(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """
+ The Unix timestamp (in seconds) for when the vector store files batch was
+ created.
+ """
+
+ file_counts: FileCounts
+
+ object: Literal["vector_store.files_batch"]
+ """The object type, which is always `vector_store.file_batch`."""
+
+ status: Literal["in_progress", "completed", "cancelled", "failed"]
+ """
+ The status of the vector store files batch, which can be either `in_progress`,
+ `completed`, `cancelled` or `failed`.
+ """
+
+ vector_store_id: str
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ that the [File](https://platform.openai.com/docs/api-reference/files) is
+ attached to.
+ """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py
new file mode 100644
index 00000000..5c856f26
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["VectorStoreFileDeleted"]
+
+
+class VectorStoreFileDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["vector_store.file.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py b/.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py
new file mode 100644
index 00000000..40fd24ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from typing_extensions import Sequence, TypedDict
+
+if TYPE_CHECKING:
+ from websockets import Subprotocol
+ from websockets.extensions import ClientExtensionFactory
+
+
+class WebsocketConnectionOptions(TypedDict, total=False):
+ """Websocket connection options copied from `websockets`.
+
+ For example: https://websockets.readthedocs.io/en/stable/reference/asyncio/client.html#websockets.asyncio.client.connect
+ """
+
+ extensions: Sequence[ClientExtensionFactory] | None
+ """List of supported extensions, in order in which they should be negotiated and run."""
+
+ subprotocols: Sequence[Subprotocol] | None
+ """List of supported subprotocols, in order of decreasing preference."""
+
+ compression: str | None
+ """The “permessage-deflate” extension is enabled by default. Set compression to None to disable it. See the [compression guide](https://websockets.readthedocs.io/en/stable/topics/compression.html) for details."""
+
+ # limits
+ max_size: int | None
+ """Maximum size of incoming messages in bytes. None disables the limit."""
+
+ max_queue: int | None | tuple[int | None, int | None]
+ """High-water mark of the buffer where frames are received. It defaults to 16 frames. The low-water mark defaults to max_queue // 4. You may pass a (high, low) tuple to set the high-water and low-water marks. If you want to disable flow control entirely, you may set it to None, although that’s a bad idea."""
+
+ write_limit: int | tuple[int, int | None]
+ """High-water mark of write buffer in bytes. It is passed to set_write_buffer_limits(). It defaults to 32 KiB. You may pass a (high, low) tuple to set the high-water and low-water marks."""