about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/openai
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/openai')
-rw-r--r--.venv/lib/python3.12/site-packages/openai/__init__.py366
-rw-r--r--.venv/lib/python3.12/site-packages/openai/__main__.py3
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_base_client.py2000
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_client.py583
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_compat.py231
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_constants.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_exceptions.py156
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_extras/__init__.py3
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_extras/_common.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_extras/numpy_proxy.py37
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_extras/pandas_proxy.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_extras/sounddevice_proxy.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_files.py123
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_legacy_response.py488
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_models.py844
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_module_client.py106
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_qs.py150
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_resource.py43
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_response.py848
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_streaming.py410
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_types.py219
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/__init__.py60
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_logs.py42
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_proxy.py62
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_reflection.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_streams.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_sync.py86
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_transform.py402
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_typing.py149
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_utils/_utils.py430
-rw-r--r--.venv/lib/python3.12/site-packages/openai/_version.py4
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/__init__.py1
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py1
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py108
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py160
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py173
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/files.py80
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/image.py139
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_api/models.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_cli.py233
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_errors.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_models.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_progress.py59
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py1
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py63
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py164
-rw-r--r--.venv/lib/python3.12/site-packages/openai/cli/_utils.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/helpers/__init__.py4
-rw-r--r--.venv/lib/python3.12/site-packages/openai/helpers/local_audio_player.py165
-rw-r--r--.venv/lib/python3.12/site-packages/openai/helpers/microphone.py100
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/.keep4
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/__init__.py2
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/_old_api.py72
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/_parsing/__init__.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/_parsing/_completions.py264
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/_parsing/_responses.py168
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/_pydantic.py155
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/_tools.py66
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/_validators.py809
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/azure.py632
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/__init__.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/_assistants.py1038
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/_deltas.py64
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/__init__.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_completions.py755
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_events.py123
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_types.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/__init__.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_events.py106
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_responses.py354
-rw-r--r--.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_types.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/pagination.py125
-rw-r--r--.venv/lib/python3.12/site-packages/openai/py.typed0
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/__init__.py201
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/audio/__init__.py61
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/audio/audio.py166
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/audio/speech.py244
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/audio/transcriptions.py682
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/audio/translations.py372
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/batches.py517
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/__init__.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/assistants.py1004
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/beta.py175
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/chat/__init__.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/chat/chat.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/chat/completions.py634
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/__init__.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/realtime.py1066
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/sessions.py383
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/transcription_sessions.py277
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/threads/__init__.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/threads/messages.py670
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/runs.py2989
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/steps.py381
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/beta/threads/threads.py1875
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/chat/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/chat/chat.py102
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/chat/completions/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/chat/completions/completions.py2331
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py212
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/completions.py1148
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/embeddings.py290
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/files.py767
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/fine_tuning.py102
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/checkpoints.py199
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/jobs.py761
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/images.py600
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/models.py306
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/moderations.py200
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/responses/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/responses/input_items.py223
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py1791
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py.orig1796
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/uploads/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/uploads/parts.py210
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/uploads/uploads.py714
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py801
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py933
-rw-r--r--.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py868
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/__init__.py78
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py113
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py49
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py49
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch.py87
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py49
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_error.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py134
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py212
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py294
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py171
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py3
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py55
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py96
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py61
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py62
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py32
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py67
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py68
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py87
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py52
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py91
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py121
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py122
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py227
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py222
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py150
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py242
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py240
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py100
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py143
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py160
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py160
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread.py63
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py401
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py185
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py55
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py46
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py103
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py55
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py42
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py245
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py261
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py52
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py70
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py44
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py78
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py38
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py115
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py56
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py71
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py73
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py70
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py150
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py79
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py57
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py25
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py404
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py5
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py40
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/chat_model.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion.py37
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion_choice.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py187
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/completion_usage.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/embedding.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py53
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/embedding_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_content.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_create_params.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_list_params.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_object.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/file_purpose.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py223
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py32
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py6
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py236
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py6
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py62
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py65
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/image_model.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/images_response.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/model.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/model_deleted.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation.py186
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_model.py9
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py155
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py44
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py77
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response.py204
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py52
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py212
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py208
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py204
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py43
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py41
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py20
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py32
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py11
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py9
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py131
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py33
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py134
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py34
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py64
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py67
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py78
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py79
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py26
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py27
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py7
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py24
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py18
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py48
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py48
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py51
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py17
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py43
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py28
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py8
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py48
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py53
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py30
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py23
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py29
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py10
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py46
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py12
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py16
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py22
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/upload.py42
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py19
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py31
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py6
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py14
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store.py82
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py40
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py39
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py13
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py47
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py35
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py45
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py21
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py67
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py54
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py15
-rw-r--r--.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py36
-rw-r--r--.venv/lib/python3.12/site-packages/openai/version.py3
560 files changed, 58495 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/openai/__init__.py b/.venv/lib/python3.12/site-packages/openai/__init__.py
new file mode 100644
index 00000000..7ce6df08
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/__init__.py
@@ -0,0 +1,366 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os as _os
+from typing_extensions import override
+
+from . import types
+from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes
+from ._utils import file_from_path
+from ._client import Client, OpenAI, Stream, Timeout, Transport, AsyncClient, AsyncOpenAI, AsyncStream, RequestOptions
+from ._models import BaseModel
+from ._version import __title__, __version__
+from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse
+from ._constants import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES, DEFAULT_CONNECTION_LIMITS
+from ._exceptions import (
+    APIError,
+    OpenAIError,
+    ConflictError,
+    NotFoundError,
+    APIStatusError,
+    RateLimitError,
+    APITimeoutError,
+    BadRequestError,
+    APIConnectionError,
+    AuthenticationError,
+    InternalServerError,
+    PermissionDeniedError,
+    LengthFinishReasonError,
+    UnprocessableEntityError,
+    APIResponseValidationError,
+    ContentFilterFinishReasonError,
+)
+from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
+from ._utils._logs import setup_logging as _setup_logging
+from ._legacy_response import HttpxBinaryResponseContent as HttpxBinaryResponseContent
+
+__all__ = [
+    "types",
+    "__version__",
+    "__title__",
+    "NoneType",
+    "Transport",
+    "ProxiesTypes",
+    "NotGiven",
+    "NOT_GIVEN",
+    "Omit",
+    "OpenAIError",
+    "APIError",
+    "APIStatusError",
+    "APITimeoutError",
+    "APIConnectionError",
+    "APIResponseValidationError",
+    "BadRequestError",
+    "AuthenticationError",
+    "PermissionDeniedError",
+    "NotFoundError",
+    "ConflictError",
+    "UnprocessableEntityError",
+    "RateLimitError",
+    "InternalServerError",
+    "LengthFinishReasonError",
+    "ContentFilterFinishReasonError",
+    "Timeout",
+    "RequestOptions",
+    "Client",
+    "AsyncClient",
+    "Stream",
+    "AsyncStream",
+    "OpenAI",
+    "AsyncOpenAI",
+    "file_from_path",
+    "BaseModel",
+    "DEFAULT_TIMEOUT",
+    "DEFAULT_MAX_RETRIES",
+    "DEFAULT_CONNECTION_LIMITS",
+    "DefaultHttpxClient",
+    "DefaultAsyncHttpxClient",
+]
+
+from .lib import azure as _azure, pydantic_function_tool as pydantic_function_tool
+from .version import VERSION as VERSION
+from .lib.azure import AzureOpenAI as AzureOpenAI, AsyncAzureOpenAI as AsyncAzureOpenAI
+from .lib._old_api import *
+from .lib.streaming import (
+    AssistantEventHandler as AssistantEventHandler,
+    AsyncAssistantEventHandler as AsyncAssistantEventHandler,
+)
+
+_setup_logging()
+
+# Update the __module__ attribute for exported symbols so that
+# error messages point to this module instead of the module
+# it was originally defined in, e.g.
+# openai._exceptions.NotFoundError -> openai.NotFoundError
+__locals = locals()
+for __name in __all__:
+    if not __name.startswith("__"):
+        try:
+            __locals[__name].__module__ = "openai"
+        except (TypeError, AttributeError):
+            # Some of our exported symbols are builtins which we can't set attributes for.
+            pass
+
+# ------ Module level client ------
+import typing as _t
+import typing_extensions as _te
+
+import httpx as _httpx
+
+from ._base_client import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES
+
+api_key: str | None = None
+
+organization: str | None = None
+
+project: str | None = None
+
+base_url: str | _httpx.URL | None = None
+
+timeout: float | Timeout | None = DEFAULT_TIMEOUT
+
+max_retries: int = DEFAULT_MAX_RETRIES
+
+default_headers: _t.Mapping[str, str] | None = None
+
+default_query: _t.Mapping[str, object] | None = None
+
+http_client: _httpx.Client | None = None
+
+_ApiType = _te.Literal["openai", "azure"]
+
+api_type: _ApiType | None = _t.cast(_ApiType, _os.environ.get("OPENAI_API_TYPE"))
+
+api_version: str | None = _os.environ.get("OPENAI_API_VERSION")
+
+azure_endpoint: str | None = _os.environ.get("AZURE_OPENAI_ENDPOINT")
+
+azure_ad_token: str | None = _os.environ.get("AZURE_OPENAI_AD_TOKEN")
+
+azure_ad_token_provider: _azure.AzureADTokenProvider | None = None
+
+
+class _ModuleClient(OpenAI):
+    # Note: we have to use type: ignores here as overriding class members
+    # with properties is technically unsafe but it is fine for our use case
+
+    @property  # type: ignore
+    @override
+    def api_key(self) -> str | None:
+        return api_key
+
+    @api_key.setter  # type: ignore
+    def api_key(self, value: str | None) -> None:  # type: ignore
+        global api_key
+
+        api_key = value
+
+    @property  # type: ignore
+    @override
+    def organization(self) -> str | None:
+        return organization
+
+    @organization.setter  # type: ignore
+    def organization(self, value: str | None) -> None:  # type: ignore
+        global organization
+
+        organization = value
+
+    @property  # type: ignore
+    @override
+    def project(self) -> str | None:
+        return project
+
+    @project.setter  # type: ignore
+    def project(self, value: str | None) -> None:  # type: ignore
+        global project
+
+        project = value
+
+    @property
+    @override
+    def base_url(self) -> _httpx.URL:
+        if base_url is not None:
+            return _httpx.URL(base_url)
+
+        return super().base_url
+
+    @base_url.setter
+    def base_url(self, url: _httpx.URL | str) -> None:
+        super().base_url = url  # type: ignore[misc]
+
+    @property  # type: ignore
+    @override
+    def timeout(self) -> float | Timeout | None:
+        return timeout
+
+    @timeout.setter  # type: ignore
+    def timeout(self, value: float | Timeout | None) -> None:  # type: ignore
+        global timeout
+
+        timeout = value
+
+    @property  # type: ignore
+    @override
+    def max_retries(self) -> int:
+        return max_retries
+
+    @max_retries.setter  # type: ignore
+    def max_retries(self, value: int) -> None:  # type: ignore
+        global max_retries
+
+        max_retries = value
+
+    @property  # type: ignore
+    @override
+    def _custom_headers(self) -> _t.Mapping[str, str] | None:
+        return default_headers
+
+    @_custom_headers.setter  # type: ignore
+    def _custom_headers(self, value: _t.Mapping[str, str] | None) -> None:  # type: ignore
+        global default_headers
+
+        default_headers = value
+
+    @property  # type: ignore
+    @override
+    def _custom_query(self) -> _t.Mapping[str, object] | None:
+        return default_query
+
+    @_custom_query.setter  # type: ignore
+    def _custom_query(self, value: _t.Mapping[str, object] | None) -> None:  # type: ignore
+        global default_query
+
+        default_query = value
+
+    @property  # type: ignore
+    @override
+    def _client(self) -> _httpx.Client:
+        return http_client or super()._client
+
+    @_client.setter  # type: ignore
+    def _client(self, value: _httpx.Client) -> None:  # type: ignore
+        global http_client
+
+        http_client = value
+
+
+class _AzureModuleClient(_ModuleClient, AzureOpenAI):  # type: ignore
+    ...
+
+
+class _AmbiguousModuleClientUsageError(OpenAIError):
+    def __init__(self) -> None:
+        super().__init__(
+            "Ambiguous use of module client; please set `openai.api_type` or the `OPENAI_API_TYPE` environment variable to `openai` or `azure`"
+        )
+
+
+def _has_openai_credentials() -> bool:
+    return _os.environ.get("OPENAI_API_KEY") is not None
+
+
+def _has_azure_credentials() -> bool:
+    return azure_endpoint is not None or _os.environ.get("AZURE_OPENAI_API_KEY") is not None
+
+
+def _has_azure_ad_credentials() -> bool:
+    return (
+        _os.environ.get("AZURE_OPENAI_AD_TOKEN") is not None
+        or azure_ad_token is not None
+        or azure_ad_token_provider is not None
+    )
+
+
+_client: OpenAI | None = None
+
+
+def _load_client() -> OpenAI:  # type: ignore[reportUnusedFunction]
+    global _client
+
+    if _client is None:
+        global api_type, azure_endpoint, azure_ad_token, api_version
+
+        if azure_endpoint is None:
+            azure_endpoint = _os.environ.get("AZURE_OPENAI_ENDPOINT")
+
+        if azure_ad_token is None:
+            azure_ad_token = _os.environ.get("AZURE_OPENAI_AD_TOKEN")
+
+        if api_version is None:
+            api_version = _os.environ.get("OPENAI_API_VERSION")
+
+        if api_type is None:
+            has_openai = _has_openai_credentials()
+            has_azure = _has_azure_credentials()
+            has_azure_ad = _has_azure_ad_credentials()
+
+            if has_openai and (has_azure or has_azure_ad):
+                raise _AmbiguousModuleClientUsageError()
+
+            if (azure_ad_token is not None or azure_ad_token_provider is not None) and _os.environ.get(
+                "AZURE_OPENAI_API_KEY"
+            ) is not None:
+                raise _AmbiguousModuleClientUsageError()
+
+            if has_azure or has_azure_ad:
+                api_type = "azure"
+            else:
+                api_type = "openai"
+
+        if api_type == "azure":
+            _client = _AzureModuleClient(  # type: ignore
+                api_version=api_version,
+                azure_endpoint=azure_endpoint,
+                api_key=api_key,
+                azure_ad_token=azure_ad_token,
+                azure_ad_token_provider=azure_ad_token_provider,
+                organization=organization,
+                base_url=base_url,
+                timeout=timeout,
+                max_retries=max_retries,
+                default_headers=default_headers,
+                default_query=default_query,
+                http_client=http_client,
+            )
+            return _client
+
+        _client = _ModuleClient(
+            api_key=api_key,
+            organization=organization,
+            project=project,
+            base_url=base_url,
+            timeout=timeout,
+            max_retries=max_retries,
+            default_headers=default_headers,
+            default_query=default_query,
+            http_client=http_client,
+        )
+        return _client
+
+    return _client
+
+
+def _reset_client() -> None:  # type: ignore[reportUnusedFunction]
+    global _client
+
+    _client = None
+
+
+from ._module_client import (
+    beta as beta,
+    chat as chat,
+    audio as audio,
+    files as files,
+    images as images,
+    models as models,
+    batches as batches,
+    uploads as uploads,
+    responses as responses,
+    embeddings as embeddings,
+    completions as completions,
+    fine_tuning as fine_tuning,
+    moderations as moderations,
+    vector_stores as vector_stores,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/__main__.py b/.venv/lib/python3.12/site-packages/openai/__main__.py
new file mode 100644
index 00000000..4e28416e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/.venv/lib/python3.12/site-packages/openai/_base_client.py b/.venv/lib/python3.12/site-packages/openai/_base_client.py
new file mode 100644
index 00000000..f31e5af5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_base_client.py
@@ -0,0 +1,2000 @@
+from __future__ import annotations
+
+import sys
+import json
+import time
+import uuid
+import email
+import asyncio
+import inspect
+import logging
+import platform
+import email.utils
+from types import TracebackType
+from random import random
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Type,
+    Union,
+    Generic,
+    Mapping,
+    TypeVar,
+    Iterable,
+    Iterator,
+    Optional,
+    Generator,
+    AsyncIterator,
+    cast,
+    overload,
+)
+from typing_extensions import Literal, override, get_origin
+
+import anyio
+import httpx
+import distro
+import pydantic
+from httpx import URL
+from pydantic import PrivateAttr
+
+from . import _exceptions
+from ._qs import Querystring
+from ._files import to_httpx_files, async_to_httpx_files
+from ._types import (
+    NOT_GIVEN,
+    Body,
+    Omit,
+    Query,
+    Headers,
+    Timeout,
+    NotGiven,
+    ResponseT,
+    AnyMapping,
+    PostParser,
+    RequestFiles,
+    HttpxSendArgs,
+    RequestOptions,
+    HttpxRequestFiles,
+    ModelBuilderProtocol,
+)
+from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
+from ._compat import PYDANTIC_V2, model_copy, model_dump
+from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type
+from ._response import (
+    APIResponse,
+    BaseAPIResponse,
+    AsyncAPIResponse,
+    extract_response_type,
+)
+from ._constants import (
+    DEFAULT_TIMEOUT,
+    MAX_RETRY_DELAY,
+    DEFAULT_MAX_RETRIES,
+    INITIAL_RETRY_DELAY,
+    RAW_RESPONSE_HEADER,
+    OVERRIDE_CAST_TO_HEADER,
+    DEFAULT_CONNECTION_LIMITS,
+)
+from ._streaming import Stream, SSEDecoder, AsyncStream, SSEBytesDecoder
+from ._exceptions import (
+    APIStatusError,
+    APITimeoutError,
+    APIConnectionError,
+    APIResponseValidationError,
+)
+from ._legacy_response import LegacyAPIResponse
+
+log: logging.Logger = logging.getLogger(__name__)
+log.addFilter(SensitiveHeadersFilter())
+
+# TODO: make base page type vars covariant
+SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]")
+AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]")
+
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+
+_StreamT = TypeVar("_StreamT", bound=Stream[Any])
+_AsyncStreamT = TypeVar("_AsyncStreamT", bound=AsyncStream[Any])
+
+if TYPE_CHECKING:
+    from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT
+else:
+    try:
+        from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT
+    except ImportError:
+        # taken from https://github.com/encode/httpx/blob/3ba5fe0d7ac70222590e759c31442b1cab263791/httpx/_config.py#L366
+        HTTPX_DEFAULT_TIMEOUT = Timeout(5.0)
+
+
+class PageInfo:
+    """Stores the necessary information to build the request to retrieve the next page.
+
+    Either `url` or `params` must be set.
+    """
+
+    url: URL | NotGiven
+    params: Query | NotGiven
+
+    @overload
+    def __init__(
+        self,
+        *,
+        url: URL,
+    ) -> None: ...
+
+    @overload
+    def __init__(
+        self,
+        *,
+        params: Query,
+    ) -> None: ...
+
+    def __init__(
+        self,
+        *,
+        url: URL | NotGiven = NOT_GIVEN,
+        params: Query | NotGiven = NOT_GIVEN,
+    ) -> None:
+        self.url = url
+        self.params = params
+
+    @override
+    def __repr__(self) -> str:
+        if self.url:
+            return f"{self.__class__.__name__}(url={self.url})"
+        return f"{self.__class__.__name__}(params={self.params})"
+
+
+class BasePage(GenericModel, Generic[_T]):
+    """
+    Defines the core interface for pagination.
+
+    Type Args:
+        ModelT: The pydantic model that represents an item in the response.
+
+    Methods:
+        has_next_page(): Check if there is another page available
+        next_page_info(): Get the necessary information to make a request for the next page
+    """
+
+    _options: FinalRequestOptions = PrivateAttr()
+    _model: Type[_T] = PrivateAttr()
+
+    def has_next_page(self) -> bool:
+        items = self._get_page_items()
+        if not items:
+            return False
+        return self.next_page_info() is not None
+
+    def next_page_info(self) -> Optional[PageInfo]: ...
+
+    def _get_page_items(self) -> Iterable[_T]:  # type: ignore[empty-body]
+        ...
+
+    def _params_from_url(self, url: URL) -> httpx.QueryParams:
+        # TODO: do we have to preprocess params here?
+        return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params)
+
+    def _info_to_options(self, info: PageInfo) -> FinalRequestOptions:
+        options = model_copy(self._options)
+        options._strip_raw_response_header()
+
+        if not isinstance(info.params, NotGiven):
+            options.params = {**options.params, **info.params}
+            return options
+
+        if not isinstance(info.url, NotGiven):
+            params = self._params_from_url(info.url)
+            url = info.url.copy_with(params=params)
+            options.params = dict(url.params)
+            options.url = str(url)
+            return options
+
+        raise ValueError("Unexpected PageInfo state")
+
+
+class BaseSyncPage(BasePage[_T], Generic[_T]):
+    _client: SyncAPIClient = pydantic.PrivateAttr()
+
+    def _set_private_attributes(
+        self,
+        client: SyncAPIClient,
+        model: Type[_T],
+        options: FinalRequestOptions,
+    ) -> None:
+        if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+            self.__pydantic_private__ = {}
+
+        self._model = model
+        self._client = client
+        self._options = options
+
+    # Pydantic uses a custom `__iter__` method to support casting BaseModels
+    # to dictionaries. e.g. dict(model).
+    # As we want to support `for item in page`, this is inherently incompatible
+    # with the default pydantic behaviour. It is not possible to support both
+    # use cases at once. Fortunately, this is not a big deal as all other pydantic
+    # methods should continue to work as expected as there is an alternative method
+    # to cast a model to a dictionary, model.dict(), which is used internally
+    # by pydantic.
+    def __iter__(self) -> Iterator[_T]:  # type: ignore
+        for page in self.iter_pages():
+            for item in page._get_page_items():
+                yield item
+
+    def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]:
+        page = self
+        while True:
+            yield page
+            if page.has_next_page():
+                page = page.get_next_page()
+            else:
+                return
+
+    def get_next_page(self: SyncPageT) -> SyncPageT:
+        info = self.next_page_info()
+        if not info:
+            raise RuntimeError(
+                "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`."
+            )
+
+        options = self._info_to_options(info)
+        return self._client._request_api_list(self._model, page=self.__class__, options=options)
+
+
+class AsyncPaginator(Generic[_T, AsyncPageT]):
+    def __init__(
+        self,
+        client: AsyncAPIClient,
+        options: FinalRequestOptions,
+        page_cls: Type[AsyncPageT],
+        model: Type[_T],
+    ) -> None:
+        self._model = model
+        self._client = client
+        self._options = options
+        self._page_cls = page_cls
+
+    def __await__(self) -> Generator[Any, None, AsyncPageT]:
+        return self._get_page().__await__()
+
+    async def _get_page(self) -> AsyncPageT:
+        def _parser(resp: AsyncPageT) -> AsyncPageT:
+            resp._set_private_attributes(
+                model=self._model,
+                options=self._options,
+                client=self._client,
+            )
+            return resp
+
+        self._options.post_parser = _parser
+
+        return await self._client.request(self._page_cls, self._options)
+
+    async def __aiter__(self) -> AsyncIterator[_T]:
+        # https://github.com/microsoft/pyright/issues/3464
+        page = cast(
+            AsyncPageT,
+            await self,  # type: ignore
+        )
+        async for item in page:
+            yield item
+
+
+class BaseAsyncPage(BasePage[_T], Generic[_T]):
+    _client: AsyncAPIClient = pydantic.PrivateAttr()
+
+    def _set_private_attributes(
+        self,
+        model: Type[_T],
+        client: AsyncAPIClient,
+        options: FinalRequestOptions,
+    ) -> None:
+        if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+            self.__pydantic_private__ = {}
+
+        self._model = model
+        self._client = client
+        self._options = options
+
+    async def __aiter__(self) -> AsyncIterator[_T]:
+        async for page in self.iter_pages():
+            for item in page._get_page_items():
+                yield item
+
+    async def iter_pages(self: AsyncPageT) -> AsyncIterator[AsyncPageT]:
+        page = self
+        while True:
+            yield page
+            if page.has_next_page():
+                page = await page.get_next_page()
+            else:
+                return
+
+    async def get_next_page(self: AsyncPageT) -> AsyncPageT:
+        info = self.next_page_info()
+        if not info:
+            raise RuntimeError(
+                "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`."
+            )
+
+        options = self._info_to_options(info)
+        return await self._client._request_api_list(self._model, page=self.__class__, options=options)
+
+
+_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient])
+_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]])
+
+
+class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
+    _client: _HttpxClientT
+    _version: str
+    _base_url: URL
+    max_retries: int
+    timeout: Union[float, Timeout, None]
+    _strict_response_validation: bool
+    _idempotency_header: str | None
+    _default_stream_cls: type[_DefaultStreamT] | None = None
+
+    def __init__(
+        self,
+        *,
+        version: str,
+        base_url: str | URL,
+        _strict_response_validation: bool,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        timeout: float | Timeout | None = DEFAULT_TIMEOUT,
+        custom_headers: Mapping[str, str] | None = None,
+        custom_query: Mapping[str, object] | None = None,
+    ) -> None:
+        self._version = version
+        self._base_url = self._enforce_trailing_slash(URL(base_url))
+        self.max_retries = max_retries
+        self.timeout = timeout
+        self._custom_headers = custom_headers or {}
+        self._custom_query = custom_query or {}
+        self._strict_response_validation = _strict_response_validation
+        self._idempotency_header = None
+        self._platform: Platform | None = None
+
+        if max_retries is None:  # pyright: ignore[reportUnnecessaryComparison]
+            raise TypeError(
+                "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `openai.DEFAULT_MAX_RETRIES`"
+            )
+
+    def _enforce_trailing_slash(self, url: URL) -> URL:
+        if url.raw_path.endswith(b"/"):
+            return url
+        return url.copy_with(raw_path=url.raw_path + b"/")
+
+    def _make_status_error_from_response(
+        self,
+        response: httpx.Response,
+    ) -> APIStatusError:
+        if response.is_closed and not response.is_stream_consumed:
+            # We can't read the response body as it has been closed
+            # before it was read. This can happen if an event hook
+            # raises a status error.
+            body = None
+            err_msg = f"Error code: {response.status_code}"
+        else:
+            err_text = response.text.strip()
+            body = err_text
+
+            try:
+                body = json.loads(err_text)
+                err_msg = f"Error code: {response.status_code} - {body}"
+            except Exception:
+                err_msg = err_text or f"Error code: {response.status_code}"
+
+        return self._make_status_error(err_msg, body=body, response=response)
+
+    def _make_status_error(
+        self,
+        err_msg: str,
+        *,
+        body: object,
+        response: httpx.Response,
+    ) -> _exceptions.APIStatusError:
+        raise NotImplementedError()
+
+    def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers:
+        custom_headers = options.headers or {}
+        headers_dict = _merge_mappings(self.default_headers, custom_headers)
+        self._validate_headers(headers_dict, custom_headers)
+
+        # headers are case-insensitive while dictionaries are not.
+        headers = httpx.Headers(headers_dict)
+
+        idempotency_header = self._idempotency_header
+        if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
+            headers[idempotency_header] = options.idempotency_key or self._idempotency_key()
+
+        # Don't set these headers if they were already set or removed by the caller. We check
+        # `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case.
+        lower_custom_headers = [header.lower() for header in custom_headers]
+        if "x-stainless-retry-count" not in lower_custom_headers:
+            headers["x-stainless-retry-count"] = str(retries_taken)
+        if "x-stainless-read-timeout" not in lower_custom_headers:
+            timeout = self.timeout if isinstance(options.timeout, NotGiven) else options.timeout
+            if isinstance(timeout, Timeout):
+                timeout = timeout.read
+            if timeout is not None:
+                headers["x-stainless-read-timeout"] = str(timeout)
+
+        return headers
+
+    def _prepare_url(self, url: str) -> URL:
+        """
+        Merge a URL argument together with any 'base_url' on the client,
+        to create the URL used for the outgoing request.
+        """
+        # Copied from httpx's `_merge_url` method.
+        merge_url = URL(url)
+        if merge_url.is_relative_url:
+            merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/")
+            return self.base_url.copy_with(raw_path=merge_raw_path)
+
+        return merge_url
+
+    def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder:
+        return SSEDecoder()
+
+    def _build_request(
+        self,
+        options: FinalRequestOptions,
+        *,
+        retries_taken: int = 0,
+    ) -> httpx.Request:
+        if log.isEnabledFor(logging.DEBUG):
+            log.debug("Request options: %s", model_dump(options, exclude_unset=True))
+
+        kwargs: dict[str, Any] = {}
+
+        json_data = options.json_data
+        if options.extra_json is not None:
+            if json_data is None:
+                json_data = cast(Body, options.extra_json)
+            elif is_mapping(json_data):
+                json_data = _merge_mappings(json_data, options.extra_json)
+            else:
+                raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`")
+
+        headers = self._build_headers(options, retries_taken=retries_taken)
+        params = _merge_mappings(self.default_query, options.params)
+        content_type = headers.get("Content-Type")
+        files = options.files
+
+        # If the given Content-Type header is multipart/form-data then it
+        # has to be removed so that httpx can generate the header with
+        # additional information for us as it has to be in this form
+        # for the server to be able to correctly parse the request:
+        # multipart/form-data; boundary=---abc--
+        if content_type is not None and content_type.startswith("multipart/form-data"):
+            if "boundary" not in content_type:
+                # only remove the header if the boundary hasn't been explicitly set
+                # as the caller doesn't want httpx to come up with their own boundary
+                headers.pop("Content-Type")
+
+            # As we are now sending multipart/form-data instead of application/json
+            # we need to tell httpx to use it, https://www.python-httpx.org/advanced/clients/#multipart-file-encoding
+            if json_data:
+                if not is_dict(json_data):
+                    raise TypeError(
+                        f"Expected query input to be a dictionary for multipart requests but got {type(json_data)} instead."
+                    )
+                kwargs["data"] = self._serialize_multipartform(json_data)
+
+            # httpx determines whether or not to send a "multipart/form-data"
+            # request based on the truthiness of the "files" argument.
+            # This gets around that issue by generating a dict value that
+            # evaluates to true.
+            #
+            # https://github.com/encode/httpx/discussions/2399#discussioncomment-3814186
+            if not files:
+                files = cast(HttpxRequestFiles, ForceMultipartDict())
+
+        prepared_url = self._prepare_url(options.url)
+        if "_" in prepared_url.host:
+            # work around https://github.com/encode/httpx/discussions/2880
+            kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
+
+        # TODO: report this error to httpx
+        return self._client.build_request(  # pyright: ignore[reportUnknownMemberType]
+            headers=headers,
+            timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout,
+            method=options.method,
+            url=prepared_url,
+            # the `Query` type that we use is incompatible with qs'
+            # `Params` type as it needs to be typed as `Mapping[str, object]`
+            # so that passing a `TypedDict` doesn't cause an error.
+            # https://github.com/microsoft/pyright/issues/3526#event-6715453066
+            params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
+            json=json_data if is_given(json_data) else None,
+            files=files,
+            **kwargs,
+        )
+
+    def _serialize_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
+        items = self.qs.stringify_items(
+            # TODO: type ignore is required as stringify_items is well typed but we can't be
+            # well typed without heavy validation.
+            data,  # type: ignore
+            array_format="brackets",
+        )
+        serialized: dict[str, object] = {}
+        for key, value in items:
+            existing = serialized.get(key)
+
+            if not existing:
+                serialized[key] = value
+                continue
+
+            # If a value has already been set for this key then that
+            # means we're sending data like `array[]=[1, 2, 3]` and we
+            # need to tell httpx that we want to send multiple values with
+            # the same key which is done by using a list or a tuple.
+            #
+            # Note: 2d arrays should never result in the same key at both
+            # levels so it's safe to assume that if the value is a list,
+            # it was because we changed it to be a list.
+            if is_list(existing):
+                existing.append(value)
+            else:
+                serialized[key] = [existing, value]
+
+        return serialized
+
+    def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalRequestOptions) -> type[ResponseT]:
+        if not is_given(options.headers):
+            return cast_to
+
+        # make a copy of the headers so we don't mutate user-input
+        headers = dict(options.headers)
+
+        # we internally support defining a temporary header to override the
+        # default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response`
+        # see _response.py for implementation details
+        override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN)
+        if is_given(override_cast_to):
+            options.headers = headers
+            return cast(Type[ResponseT], override_cast_to)
+
+        return cast_to
+
+    def _should_stream_response_body(self, request: httpx.Request) -> bool:
+        return request.headers.get(RAW_RESPONSE_HEADER) == "stream"  # type: ignore[no-any-return]
+
+    def _process_response_data(
+        self,
+        *,
+        data: object,
+        cast_to: type[ResponseT],
+        response: httpx.Response,
+    ) -> ResponseT:
+        if data is None:
+            return cast(ResponseT, None)
+
+        if cast_to is object:
+            return cast(ResponseT, data)
+
+        try:
+            if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol):
+                return cast(ResponseT, cast_to.build(response=response, data=data))
+
+            if self._strict_response_validation:
+                return cast(ResponseT, validate_type(type_=cast_to, value=data))
+
+            return cast(ResponseT, construct_type(type_=cast_to, value=data))
+        except pydantic.ValidationError as err:
+            raise APIResponseValidationError(response=response, body=data) from err
+
+    @property
+    def qs(self) -> Querystring:
+        return Querystring()
+
+    @property
+    def custom_auth(self) -> httpx.Auth | None:
+        return None
+
+    @property
+    def auth_headers(self) -> dict[str, str]:
+        return {}
+
+    @property
+    def default_headers(self) -> dict[str, str | Omit]:
+        return {
+            "Accept": "application/json",
+            "Content-Type": "application/json",
+            "User-Agent": self.user_agent,
+            **self.platform_headers(),
+            **self.auth_headers,
+            **self._custom_headers,
+        }
+
+    @property
+    def default_query(self) -> dict[str, object]:
+        return {
+            **self._custom_query,
+        }
+
+    def _validate_headers(
+        self,
+        headers: Headers,  # noqa: ARG002
+        custom_headers: Headers,  # noqa: ARG002
+    ) -> None:
+        """Validate the given default headers and custom headers.
+
+        Does nothing by default.
+        """
+        return
+
+    @property
+    def user_agent(self) -> str:
+        return f"{self.__class__.__name__}/Python {self._version}"
+
+    @property
+    def base_url(self) -> URL:
+        return self._base_url
+
+    @base_url.setter
+    def base_url(self, url: URL | str) -> None:
+        self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url))
+
+    def platform_headers(self) -> Dict[str, str]:
+        # the actual implementation is in a separate `lru_cache` decorated
+        # function because adding `lru_cache` to methods will leak memory
+        # https://github.com/python/cpython/issues/88476
+        return platform_headers(self._version, platform=self._platform)
+
+    def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None:
+        """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified.
+
+        About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
+        See also  https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax
+        """
+        if response_headers is None:
+            return None
+
+        # First, try the non-standard `retry-after-ms` header for milliseconds,
+        # which is more precise than integer-seconds `retry-after`
+        try:
+            retry_ms_header = response_headers.get("retry-after-ms", None)
+            return float(retry_ms_header) / 1000
+        except (TypeError, ValueError):
+            pass
+
+        # Next, try parsing `retry-after` header as seconds (allowing nonstandard floats).
+        retry_header = response_headers.get("retry-after")
+        try:
+            # note: the spec indicates that this should only ever be an integer
+            # but if someone sends a float there's no reason for us to not respect it
+            return float(retry_header)
+        except (TypeError, ValueError):
+            pass
+
+        # Last, try parsing `retry-after` as a date.
+        retry_date_tuple = email.utils.parsedate_tz(retry_header)
+        if retry_date_tuple is None:
+            return None
+
+        retry_date = email.utils.mktime_tz(retry_date_tuple)
+        return float(retry_date - time.time())
+
+    def _calculate_retry_timeout(
+        self,
+        remaining_retries: int,
+        options: FinalRequestOptions,
+        response_headers: Optional[httpx.Headers] = None,
+    ) -> float:
+        max_retries = options.get_max_retries(self.max_retries)
+
+        # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
+        retry_after = self._parse_retry_after_header(response_headers)
+        if retry_after is not None and 0 < retry_after <= 60:
+            return retry_after
+
+        # Also cap retry count to 1000 to avoid any potential overflows with `pow`
+        nb_retries = min(max_retries - remaining_retries, 1000)
+
+        # Apply exponential backoff, but not more than the max.
+        sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY)
+
+        # Apply some jitter, plus-or-minus half a second.
+        jitter = 1 - 0.25 * random()
+        timeout = sleep_seconds * jitter
+        return timeout if timeout >= 0 else 0
+
+    def _should_retry(self, response: httpx.Response) -> bool:
+        # Note: this is not a standard header
+        should_retry_header = response.headers.get("x-should-retry")
+
+        # If the server explicitly says whether or not to retry, obey.
+        if should_retry_header == "true":
+            log.debug("Retrying as header `x-should-retry` is set to `true`")
+            return True
+        if should_retry_header == "false":
+            log.debug("Not retrying as header `x-should-retry` is set to `false`")
+            return False
+
+        # Retry on request timeouts.
+        if response.status_code == 408:
+            log.debug("Retrying due to status code %i", response.status_code)
+            return True
+
+        # Retry on lock timeouts.
+        if response.status_code == 409:
+            log.debug("Retrying due to status code %i", response.status_code)
+            return True
+
+        # Retry on rate limits.
+        if response.status_code == 429:
+            log.debug("Retrying due to status code %i", response.status_code)
+            return True
+
+        # Retry internal errors.
+        if response.status_code >= 500:
+            log.debug("Retrying due to status code %i", response.status_code)
+            return True
+
+        log.debug("Not retrying")
+        return False
+
+    def _idempotency_key(self) -> str:
+        return f"stainless-python-retry-{uuid.uuid4()}"
+
+
+class _DefaultHttpxClient(httpx.Client):
+    def __init__(self, **kwargs: Any) -> None:
+        kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
+        kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
+        kwargs.setdefault("follow_redirects", True)
+        super().__init__(**kwargs)
+
+
+if TYPE_CHECKING:
+    DefaultHttpxClient = httpx.Client
+    """An alias to `httpx.Client` that provides the same defaults that this SDK
+    uses internally.
+
+    This is useful because overriding the `http_client` with your own instance of
+    `httpx.Client` will result in httpx's defaults being used, not ours.
+    """
+else:
+    DefaultHttpxClient = _DefaultHttpxClient
+
+
+class SyncHttpxClientWrapper(DefaultHttpxClient):
+    def __del__(self) -> None:
+        if self.is_closed:
+            return
+
+        try:
+            self.close()
+        except Exception:
+            pass
+
+
+class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
+    _client: httpx.Client
+    _default_stream_cls: type[Stream[Any]] | None = None
+
+    def __init__(
+        self,
+        *,
+        version: str,
+        base_url: str | URL,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        http_client: httpx.Client | None = None,
+        custom_headers: Mapping[str, str] | None = None,
+        custom_query: Mapping[str, object] | None = None,
+        _strict_response_validation: bool,
+    ) -> None:
+        if not is_given(timeout):
+            # if the user passed in a custom http client with a non-default
+            # timeout set then we use that timeout.
+            #
+            # note: there is an edge case here where the user passes in a client
+            # where they've explicitly set the timeout to match the default timeout
+            # as this check is structural, meaning that we'll think they didn't
+            # pass in a timeout and will ignore it
+            if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT:
+                timeout = http_client.timeout
+            else:
+                timeout = DEFAULT_TIMEOUT
+
+        if http_client is not None and not isinstance(http_client, httpx.Client):  # pyright: ignore[reportUnnecessaryIsInstance]
+            raise TypeError(
+                f"Invalid `http_client` argument; Expected an instance of `httpx.Client` but got {type(http_client)}"
+            )
+
+        super().__init__(
+            version=version,
+            # cast to a valid type because mypy doesn't understand our type narrowing
+            timeout=cast(Timeout, timeout),
+            base_url=base_url,
+            max_retries=max_retries,
+            custom_query=custom_query,
+            custom_headers=custom_headers,
+            _strict_response_validation=_strict_response_validation,
+        )
+        self._client = http_client or SyncHttpxClientWrapper(
+            base_url=base_url,
+            # cast to a valid type because mypy doesn't understand our type narrowing
+            timeout=cast(Timeout, timeout),
+        )
+
+    def is_closed(self) -> bool:
+        return self._client.is_closed
+
+    def close(self) -> None:
+        """Close the underlying HTTPX client.
+
+        The client will *not* be usable after this.
+        """
+        # If an error is thrown while constructing a client, self._client
+        # may not be present
+        if hasattr(self, "_client"):
+            self._client.close()
+
+    def __enter__(self: _T) -> _T:
+        return self
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.close()
+
+    def _prepare_options(
+        self,
+        options: FinalRequestOptions,  # noqa: ARG002
+    ) -> FinalRequestOptions:
+        """Hook for mutating the given options"""
+        return options
+
+    def _prepare_request(
+        self,
+        request: httpx.Request,  # noqa: ARG002
+    ) -> None:
+        """This method is used as a callback for mutating the `Request` object
+        after it has been constructed.
+        This is useful for cases where you want to add certain headers based off of
+        the request properties, e.g. `url`, `method` etc.
+        """
+        return None
+
+    @overload
+    def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        remaining_retries: Optional[int] = None,
+        *,
+        stream: Literal[True],
+        stream_cls: Type[_StreamT],
+    ) -> _StreamT: ...
+
+    @overload
+    def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        remaining_retries: Optional[int] = None,
+        *,
+        stream: Literal[False] = False,
+    ) -> ResponseT: ...
+
+    @overload
+    def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        remaining_retries: Optional[int] = None,
+        *,
+        stream: bool = False,
+        stream_cls: Type[_StreamT] | None = None,
+    ) -> ResponseT | _StreamT: ...
+
+    def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        remaining_retries: Optional[int] = None,
+        *,
+        stream: bool = False,
+        stream_cls: type[_StreamT] | None = None,
+    ) -> ResponseT | _StreamT:
+        if remaining_retries is not None:
+            retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
+        else:
+            retries_taken = 0
+
+        return self._request(
+            cast_to=cast_to,
+            options=options,
+            stream=stream,
+            stream_cls=stream_cls,
+            retries_taken=retries_taken,
+        )
+
+    def _request(
+        self,
+        *,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        retries_taken: int,
+        stream: bool,
+        stream_cls: type[_StreamT] | None,
+    ) -> ResponseT | _StreamT:
+        # create a copy of the options we were given so that if the
+        # options are mutated later & we then retry, the retries are
+        # given the original options
+        input_options = model_copy(options)
+
+        cast_to = self._maybe_override_cast_to(cast_to, options)
+        options = self._prepare_options(options)
+
+        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+        request = self._build_request(options, retries_taken=retries_taken)
+        self._prepare_request(request)
+
+        kwargs: HttpxSendArgs = {}
+        if self.custom_auth is not None:
+            kwargs["auth"] = self.custom_auth
+
+        log.debug("Sending HTTP Request: %s %s", request.method, request.url)
+
+        try:
+            response = self._client.send(
+                request,
+                stream=stream or self._should_stream_response_body(request=request),
+                **kwargs,
+            )
+        except httpx.TimeoutException as err:
+            log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+            if remaining_retries > 0:
+                return self._retry_request(
+                    input_options,
+                    cast_to,
+                    retries_taken=retries_taken,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    response_headers=None,
+                )
+
+            log.debug("Raising timeout error")
+            raise APITimeoutError(request=request) from err
+        except Exception as err:
+            log.debug("Encountered Exception", exc_info=True)
+
+            if remaining_retries > 0:
+                return self._retry_request(
+                    input_options,
+                    cast_to,
+                    retries_taken=retries_taken,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    response_headers=None,
+                )
+
+            log.debug("Raising connection error")
+            raise APIConnectionError(request=request) from err
+
+        log.debug(
+            'HTTP Response: %s %s "%i %s" %s',
+            request.method,
+            request.url,
+            response.status_code,
+            response.reason_phrase,
+            response.headers,
+        )
+        log.debug("request_id: %s", response.headers.get("x-request-id"))
+
+        try:
+            response.raise_for_status()
+        except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
+            log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+            if remaining_retries > 0 and self._should_retry(err.response):
+                err.response.close()
+                return self._retry_request(
+                    input_options,
+                    cast_to,
+                    retries_taken=retries_taken,
+                    response_headers=err.response.headers,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                )
+
+            # If the response is streamed then we need to explicitly read the response
+            # to completion before attempting to access the response text.
+            if not err.response.is_closed:
+                err.response.read()
+
+            log.debug("Re-raising status error")
+            raise self._make_status_error_from_response(err.response) from None
+
+        return self._process_response(
+            cast_to=cast_to,
+            options=options,
+            response=response,
+            stream=stream,
+            stream_cls=stream_cls,
+            retries_taken=retries_taken,
+        )
+
+    def _retry_request(
+        self,
+        options: FinalRequestOptions,
+        cast_to: Type[ResponseT],
+        *,
+        retries_taken: int,
+        response_headers: httpx.Headers | None,
+        stream: bool,
+        stream_cls: type[_StreamT] | None,
+    ) -> ResponseT | _StreamT:
+        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+        if remaining_retries == 1:
+            log.debug("1 retry left")
+        else:
+            log.debug("%i retries left", remaining_retries)
+
+        timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+        log.info("Retrying request to %s in %f seconds", options.url, timeout)
+
+        # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
+        # different thread if necessary.
+        time.sleep(timeout)
+
+        return self._request(
+            options=options,
+            cast_to=cast_to,
+            retries_taken=retries_taken + 1,
+            stream=stream,
+            stream_cls=stream_cls,
+        )
+
+    def _process_response(
+        self,
+        *,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        response: httpx.Response,
+        stream: bool,
+        stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
+        retries_taken: int = 0,
+    ) -> ResponseT:
+        if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
+            return cast(
+                ResponseT,
+                LegacyAPIResponse(
+                    raw=response,
+                    client=self,
+                    cast_to=cast_to,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    options=options,
+                    retries_taken=retries_taken,
+                ),
+            )
+
+        origin = get_origin(cast_to) or cast_to
+
+        if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
+            if not issubclass(origin, APIResponse):
+                raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")
+
+            response_cls = cast("type[BaseAPIResponse[Any]]", cast_to)
+            return cast(
+                ResponseT,
+                response_cls(
+                    raw=response,
+                    client=self,
+                    cast_to=extract_response_type(response_cls),
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    options=options,
+                    retries_taken=retries_taken,
+                ),
+            )
+
+        if cast_to == httpx.Response:
+            return cast(ResponseT, response)
+
+        api_response = APIResponse(
+            raw=response,
+            client=self,
+            cast_to=cast("type[ResponseT]", cast_to),  # pyright: ignore[reportUnnecessaryCast]
+            stream=stream,
+            stream_cls=stream_cls,
+            options=options,
+            retries_taken=retries_taken,
+        )
+        if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
+            return cast(ResponseT, api_response)
+
+        return api_response.parse()
+
+    def _request_api_list(
+        self,
+        model: Type[object],
+        page: Type[SyncPageT],
+        options: FinalRequestOptions,
+    ) -> SyncPageT:
+        def _parser(resp: SyncPageT) -> SyncPageT:
+            resp._set_private_attributes(
+                client=self,
+                model=model,
+                options=options,
+            )
+            return resp
+
+        options.post_parser = _parser
+
+        return self.request(page, options, stream=False)
+
+    @overload
+    def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: Literal[False] = False,
+    ) -> ResponseT: ...
+
+    @overload
+    def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: Literal[True],
+        stream_cls: type[_StreamT],
+    ) -> _StreamT: ...
+
+    @overload
+    def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: bool,
+        stream_cls: type[_StreamT] | None = None,
+    ) -> ResponseT | _StreamT: ...
+
+    def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: bool = False,
+        stream_cls: type[_StreamT] | None = None,
+    ) -> ResponseT | _StreamT:
+        opts = FinalRequestOptions.construct(method="get", url=path, **options)
+        # cast is required because mypy complains about returning Any even though
+        # it understands the type variables
+        return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
+
+    @overload
+    def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+        files: RequestFiles | None = None,
+        stream: Literal[False] = False,
+    ) -> ResponseT: ...
+
+    @overload
+    def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+        files: RequestFiles | None = None,
+        stream: Literal[True],
+        stream_cls: type[_StreamT],
+    ) -> _StreamT: ...
+
+    @overload
+    def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+        files: RequestFiles | None = None,
+        stream: bool,
+        stream_cls: type[_StreamT] | None = None,
+    ) -> ResponseT | _StreamT: ...
+
+    def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+        files: RequestFiles | None = None,
+        stream: bool = False,
+        stream_cls: type[_StreamT] | None = None,
+    ) -> ResponseT | _StreamT:
+        opts = FinalRequestOptions.construct(
+            method="post", url=path, json_data=body, files=to_httpx_files(files), **options
+        )
+        return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
+
+    def patch(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+    ) -> ResponseT:
+        opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+        return self.request(cast_to, opts)
+
+    def put(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        files: RequestFiles | None = None,
+        options: RequestOptions = {},
+    ) -> ResponseT:
+        opts = FinalRequestOptions.construct(
+            method="put", url=path, json_data=body, files=to_httpx_files(files), **options
+        )
+        return self.request(cast_to, opts)
+
+    def delete(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+    ) -> ResponseT:
+        opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+        return self.request(cast_to, opts)
+
+    def get_api_list(
+        self,
+        path: str,
+        *,
+        model: Type[object],
+        page: Type[SyncPageT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+        method: str = "get",
+    ) -> SyncPageT:
+        opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options)
+        return self._request_api_list(model, page, opts)
+
+
+class _DefaultAsyncHttpxClient(httpx.AsyncClient):
+    def __init__(self, **kwargs: Any) -> None:
+        kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
+        kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
+        kwargs.setdefault("follow_redirects", True)
+        super().__init__(**kwargs)
+
+
+if TYPE_CHECKING:
+    DefaultAsyncHttpxClient = httpx.AsyncClient
+    """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK
+    uses internally.
+
+    This is useful because overriding the `http_client` with your own instance of
+    `httpx.AsyncClient` will result in httpx's defaults being used, not ours.
+    """
+else:
+    DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient
+
+
+class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient):
+    def __del__(self) -> None:
+        if self.is_closed:
+            return
+
+        try:
+            # TODO(someday): support non asyncio runtimes here
+            asyncio.get_running_loop().create_task(self.aclose())
+        except Exception:
+            pass
+
+
+class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
+    _client: httpx.AsyncClient
+    _default_stream_cls: type[AsyncStream[Any]] | None = None
+
+    def __init__(
+        self,
+        *,
+        version: str,
+        base_url: str | URL,
+        _strict_response_validation: bool,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        http_client: httpx.AsyncClient | None = None,
+        custom_headers: Mapping[str, str] | None = None,
+        custom_query: Mapping[str, object] | None = None,
+    ) -> None:
+        if not is_given(timeout):
+            # if the user passed in a custom http client with a non-default
+            # timeout set then we use that timeout.
+            #
+            # note: there is an edge case here where the user passes in a client
+            # where they've explicitly set the timeout to match the default timeout
+            # as this check is structural, meaning that we'll think they didn't
+            # pass in a timeout and will ignore it
+            if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT:
+                timeout = http_client.timeout
+            else:
+                timeout = DEFAULT_TIMEOUT
+
+        if http_client is not None and not isinstance(http_client, httpx.AsyncClient):  # pyright: ignore[reportUnnecessaryIsInstance]
+            raise TypeError(
+                f"Invalid `http_client` argument; Expected an instance of `httpx.AsyncClient` but got {type(http_client)}"
+            )
+
+        super().__init__(
+            version=version,
+            base_url=base_url,
+            # cast to a valid type because mypy doesn't understand our type narrowing
+            timeout=cast(Timeout, timeout),
+            max_retries=max_retries,
+            custom_query=custom_query,
+            custom_headers=custom_headers,
+            _strict_response_validation=_strict_response_validation,
+        )
+        self._client = http_client or AsyncHttpxClientWrapper(
+            base_url=base_url,
+            # cast to a valid type because mypy doesn't understand our type narrowing
+            timeout=cast(Timeout, timeout),
+        )
+
+    def is_closed(self) -> bool:
+        return self._client.is_closed
+
+    async def close(self) -> None:
+        """Close the underlying HTTPX client.
+
+        The client will *not* be usable after this.
+        """
+        await self._client.aclose()
+
+    async def __aenter__(self: _T) -> _T:
+        return self
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        await self.close()
+
+    async def _prepare_options(
+        self,
+        options: FinalRequestOptions,  # noqa: ARG002
+    ) -> FinalRequestOptions:
+        """Hook for mutating the given options"""
+        return options
+
+    async def _prepare_request(
+        self,
+        request: httpx.Request,  # noqa: ARG002
+    ) -> None:
+        """This method is used as a callback for mutating the `Request` object
+        after it has been constructed.
+        This is useful for cases where you want to add certain headers based off of
+        the request properties, e.g. `url`, `method` etc.
+        """
+        return None
+
+    @overload
+    async def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        *,
+        stream: Literal[False] = False,
+        remaining_retries: Optional[int] = None,
+    ) -> ResponseT: ...
+
+    @overload
+    async def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        *,
+        stream: Literal[True],
+        stream_cls: type[_AsyncStreamT],
+        remaining_retries: Optional[int] = None,
+    ) -> _AsyncStreamT: ...
+
+    @overload
+    async def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        *,
+        stream: bool,
+        stream_cls: type[_AsyncStreamT] | None = None,
+        remaining_retries: Optional[int] = None,
+    ) -> ResponseT | _AsyncStreamT: ...
+
+    async def request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        *,
+        stream: bool = False,
+        stream_cls: type[_AsyncStreamT] | None = None,
+        remaining_retries: Optional[int] = None,
+    ) -> ResponseT | _AsyncStreamT:
+        if remaining_retries is not None:
+            retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
+        else:
+            retries_taken = 0
+
+        return await self._request(
+            cast_to=cast_to,
+            options=options,
+            stream=stream,
+            stream_cls=stream_cls,
+            retries_taken=retries_taken,
+        )
+
+    async def _request(
+        self,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        *,
+        stream: bool,
+        stream_cls: type[_AsyncStreamT] | None,
+        retries_taken: int,
+    ) -> ResponseT | _AsyncStreamT:
+        if self._platform is None:
+            # `get_platform` can make blocking IO calls so we
+            # execute it earlier while we are in an async context
+            self._platform = await asyncify(get_platform)()
+
+        # create a copy of the options we were given so that if the
+        # options are mutated later & we then retry, the retries are
+        # given the original options
+        input_options = model_copy(options)
+
+        cast_to = self._maybe_override_cast_to(cast_to, options)
+        options = await self._prepare_options(options)
+
+        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+        request = self._build_request(options, retries_taken=retries_taken)
+        await self._prepare_request(request)
+
+        kwargs: HttpxSendArgs = {}
+        if self.custom_auth is not None:
+            kwargs["auth"] = self.custom_auth
+
+        try:
+            response = await self._client.send(
+                request,
+                stream=stream or self._should_stream_response_body(request=request),
+                **kwargs,
+            )
+        except httpx.TimeoutException as err:
+            log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+            if remaining_retries > 0:
+                return await self._retry_request(
+                    input_options,
+                    cast_to,
+                    retries_taken=retries_taken,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    response_headers=None,
+                )
+
+            log.debug("Raising timeout error")
+            raise APITimeoutError(request=request) from err
+        except Exception as err:
+            log.debug("Encountered Exception", exc_info=True)
+
+            if remaining_retries > 0:
+                return await self._retry_request(
+                    input_options,
+                    cast_to,
+                    retries_taken=retries_taken,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    response_headers=None,
+                )
+
+            log.debug("Raising connection error")
+            raise APIConnectionError(request=request) from err
+
+        log.debug(
+            'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
+        )
+
+        try:
+            response.raise_for_status()
+        except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
+            log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+            if remaining_retries > 0 and self._should_retry(err.response):
+                await err.response.aclose()
+                return await self._retry_request(
+                    input_options,
+                    cast_to,
+                    retries_taken=retries_taken,
+                    response_headers=err.response.headers,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                )
+
+            # If the response is streamed then we need to explicitly read the response
+            # to completion before attempting to access the response text.
+            if not err.response.is_closed:
+                await err.response.aread()
+
+            log.debug("Re-raising status error")
+            raise self._make_status_error_from_response(err.response) from None
+
+        return await self._process_response(
+            cast_to=cast_to,
+            options=options,
+            response=response,
+            stream=stream,
+            stream_cls=stream_cls,
+            retries_taken=retries_taken,
+        )
+
+    async def _retry_request(
+        self,
+        options: FinalRequestOptions,
+        cast_to: Type[ResponseT],
+        *,
+        retries_taken: int,
+        response_headers: httpx.Headers | None,
+        stream: bool,
+        stream_cls: type[_AsyncStreamT] | None,
+    ) -> ResponseT | _AsyncStreamT:
+        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+        if remaining_retries == 1:
+            log.debug("1 retry left")
+        else:
+            log.debug("%i retries left", remaining_retries)
+
+        timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+        log.info("Retrying request to %s in %f seconds", options.url, timeout)
+
+        await anyio.sleep(timeout)
+
+        return await self._request(
+            options=options,
+            cast_to=cast_to,
+            retries_taken=retries_taken + 1,
+            stream=stream,
+            stream_cls=stream_cls,
+        )
+
+    async def _process_response(
+        self,
+        *,
+        cast_to: Type[ResponseT],
+        options: FinalRequestOptions,
+        response: httpx.Response,
+        stream: bool,
+        stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
+        retries_taken: int = 0,
+    ) -> ResponseT:
+        if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
+            return cast(
+                ResponseT,
+                LegacyAPIResponse(
+                    raw=response,
+                    client=self,
+                    cast_to=cast_to,
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    options=options,
+                    retries_taken=retries_taken,
+                ),
+            )
+
+        origin = get_origin(cast_to) or cast_to
+
+        if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
+            if not issubclass(origin, AsyncAPIResponse):
+                raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}")
+
+            response_cls = cast("type[BaseAPIResponse[Any]]", cast_to)
+            return cast(
+                "ResponseT",
+                response_cls(
+                    raw=response,
+                    client=self,
+                    cast_to=extract_response_type(response_cls),
+                    stream=stream,
+                    stream_cls=stream_cls,
+                    options=options,
+                    retries_taken=retries_taken,
+                ),
+            )
+
+        if cast_to == httpx.Response:
+            return cast(ResponseT, response)
+
+        api_response = AsyncAPIResponse(
+            raw=response,
+            client=self,
+            cast_to=cast("type[ResponseT]", cast_to),  # pyright: ignore[reportUnnecessaryCast]
+            stream=stream,
+            stream_cls=stream_cls,
+            options=options,
+            retries_taken=retries_taken,
+        )
+        if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
+            return cast(ResponseT, api_response)
+
+        return await api_response.parse()
+
+    def _request_api_list(
+        self,
+        model: Type[_T],
+        page: Type[AsyncPageT],
+        options: FinalRequestOptions,
+    ) -> AsyncPaginator[_T, AsyncPageT]:
+        return AsyncPaginator(client=self, options=options, page_cls=page, model=model)
+
+    @overload
+    async def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: Literal[False] = False,
+    ) -> ResponseT: ...
+
+    @overload
+    async def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: Literal[True],
+        stream_cls: type[_AsyncStreamT],
+    ) -> _AsyncStreamT: ...
+
+    @overload
+    async def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: bool,
+        stream_cls: type[_AsyncStreamT] | None = None,
+    ) -> ResponseT | _AsyncStreamT: ...
+
+    async def get(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        options: RequestOptions = {},
+        stream: bool = False,
+        stream_cls: type[_AsyncStreamT] | None = None,
+    ) -> ResponseT | _AsyncStreamT:
+        opts = FinalRequestOptions.construct(method="get", url=path, **options)
+        return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
+
+    @overload
+    async def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        files: RequestFiles | None = None,
+        options: RequestOptions = {},
+        stream: Literal[False] = False,
+    ) -> ResponseT: ...
+
+    @overload
+    async def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        files: RequestFiles | None = None,
+        options: RequestOptions = {},
+        stream: Literal[True],
+        stream_cls: type[_AsyncStreamT],
+    ) -> _AsyncStreamT: ...
+
+    @overload
+    async def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        files: RequestFiles | None = None,
+        options: RequestOptions = {},
+        stream: bool,
+        stream_cls: type[_AsyncStreamT] | None = None,
+    ) -> ResponseT | _AsyncStreamT: ...
+
+    async def post(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        files: RequestFiles | None = None,
+        options: RequestOptions = {},
+        stream: bool = False,
+        stream_cls: type[_AsyncStreamT] | None = None,
+    ) -> ResponseT | _AsyncStreamT:
+        opts = FinalRequestOptions.construct(
+            method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+        )
+        return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
+
+    async def patch(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+    ) -> ResponseT:
+        opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+        return await self.request(cast_to, opts)
+
+    async def put(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        files: RequestFiles | None = None,
+        options: RequestOptions = {},
+    ) -> ResponseT:
+        opts = FinalRequestOptions.construct(
+            method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+        )
+        return await self.request(cast_to, opts)
+
+    async def delete(
+        self,
+        path: str,
+        *,
+        cast_to: Type[ResponseT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+    ) -> ResponseT:
+        opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+        return await self.request(cast_to, opts)
+
+    def get_api_list(
+        self,
+        path: str,
+        *,
+        model: Type[_T],
+        page: Type[AsyncPageT],
+        body: Body | None = None,
+        options: RequestOptions = {},
+        method: str = "get",
+    ) -> AsyncPaginator[_T, AsyncPageT]:
+        opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options)
+        return self._request_api_list(model, page, opts)
+
+
+def make_request_options(
+    *,
+    query: Query | None = None,
+    extra_headers: Headers | None = None,
+    extra_query: Query | None = None,
+    extra_body: Body | None = None,
+    idempotency_key: str | None = None,
+    timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    post_parser: PostParser | NotGiven = NOT_GIVEN,
+) -> RequestOptions:
+    """Create a dict of type RequestOptions without keys of NotGiven values."""
+    options: RequestOptions = {}
+    if extra_headers is not None:
+        options["headers"] = extra_headers
+
+    if extra_body is not None:
+        options["extra_json"] = cast(AnyMapping, extra_body)
+
+    if query is not None:
+        options["params"] = query
+
+    if extra_query is not None:
+        options["params"] = {**options.get("params", {}), **extra_query}
+
+    if not isinstance(timeout, NotGiven):
+        options["timeout"] = timeout
+
+    if idempotency_key is not None:
+        options["idempotency_key"] = idempotency_key
+
+    if is_given(post_parser):
+        # internal
+        options["post_parser"] = post_parser  # type: ignore
+
+    return options
+
+
+class ForceMultipartDict(Dict[str, None]):
+    def __bool__(self) -> bool:
+        return True
+
+
+class OtherPlatform:
+    def __init__(self, name: str) -> None:
+        self.name = name
+
+    @override
+    def __str__(self) -> str:
+        return f"Other:{self.name}"
+
+
+Platform = Union[
+    OtherPlatform,
+    Literal[
+        "MacOS",
+        "Linux",
+        "Windows",
+        "FreeBSD",
+        "OpenBSD",
+        "iOS",
+        "Android",
+        "Unknown",
+    ],
+]
+
+
+def get_platform() -> Platform:
+    try:
+        system = platform.system().lower()
+        platform_name = platform.platform().lower()
+    except Exception:
+        return "Unknown"
+
+    if "iphone" in platform_name or "ipad" in platform_name:
+        # Tested using Python3IDE on an iPhone 11 and Pythonista on an iPad 7
+        # system is Darwin and platform_name is a string like:
+        # - Darwin-21.6.0-iPhone12,1-64bit
+        # - Darwin-21.6.0-iPad7,11-64bit
+        return "iOS"
+
+    if system == "darwin":
+        return "MacOS"
+
+    if system == "windows":
+        return "Windows"
+
+    if "android" in platform_name:
+        # Tested using Pydroid 3
+        # system is Linux and platform_name is a string like 'Linux-5.10.81-android12-9-00001-geba40aecb3b7-ab8534902-aarch64-with-libc'
+        return "Android"
+
+    if system == "linux":
+        # https://distro.readthedocs.io/en/latest/#distro.id
+        distro_id = distro.id()
+        if distro_id == "freebsd":
+            return "FreeBSD"
+
+        if distro_id == "openbsd":
+            return "OpenBSD"
+
+        return "Linux"
+
+    if platform_name:
+        return OtherPlatform(platform_name)
+
+    return "Unknown"
+
+
+@lru_cache(maxsize=None)
+def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]:
+    return {
+        "X-Stainless-Lang": "python",
+        "X-Stainless-Package-Version": version,
+        "X-Stainless-OS": str(platform or get_platform()),
+        "X-Stainless-Arch": str(get_architecture()),
+        "X-Stainless-Runtime": get_python_runtime(),
+        "X-Stainless-Runtime-Version": get_python_version(),
+    }
+
+
+class OtherArch:
+    def __init__(self, name: str) -> None:
+        self.name = name
+
+    @override
+    def __str__(self) -> str:
+        return f"other:{self.name}"
+
+
+Arch = Union[OtherArch, Literal["x32", "x64", "arm", "arm64", "unknown"]]
+
+
+def get_python_runtime() -> str:
+    try:
+        return platform.python_implementation()
+    except Exception:
+        return "unknown"
+
+
+def get_python_version() -> str:
+    try:
+        return platform.python_version()
+    except Exception:
+        return "unknown"
+
+
+def get_architecture() -> Arch:
+    try:
+        machine = platform.machine().lower()
+    except Exception:
+        return "unknown"
+
+    if machine in ("arm64", "aarch64"):
+        return "arm64"
+
+    # TODO: untested
+    if machine == "arm":
+        return "arm"
+
+    if machine == "x86_64":
+        return "x64"
+
+    # TODO: untested
+    if sys.maxsize <= 2**32:
+        return "x32"
+
+    if machine:
+        return OtherArch(machine)
+
+    return "unknown"
+
+
+def _merge_mappings(
+    obj1: Mapping[_T_co, Union[_T, Omit]],
+    obj2: Mapping[_T_co, Union[_T, Omit]],
+) -> Dict[_T_co, _T]:
+    """Merge two mappings of the same type, removing any values that are instances of `Omit`.
+
+    In cases with duplicate keys the second mapping takes precedence.
+    """
+    merged = {**obj1, **obj2}
+    return {key: value for key, value in merged.items() if not isinstance(value, Omit)}
diff --git a/.venv/lib/python3.12/site-packages/openai/_client.py b/.venv/lib/python3.12/site-packages/openai/_client.py
new file mode 100644
index 00000000..18d96da9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_client.py
@@ -0,0 +1,583 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, Union, Mapping
+from typing_extensions import Self, override
+
+import httpx
+
+from . import _exceptions
+from ._qs import Querystring
+from ._types import (
+    NOT_GIVEN,
+    Omit,
+    Timeout,
+    NotGiven,
+    Transport,
+    ProxiesTypes,
+    RequestOptions,
+)
+from ._utils import (
+    is_given,
+    is_mapping,
+    get_async_library,
+)
+from ._version import __version__
+from .resources import files, images, models, batches, embeddings, completions, moderations
+from ._streaming import Stream as Stream, AsyncStream as AsyncStream
+from ._exceptions import OpenAIError, APIStatusError
+from ._base_client import (
+    DEFAULT_MAX_RETRIES,
+    SyncAPIClient,
+    AsyncAPIClient,
+)
+from .resources.beta import beta
+from .resources.chat import chat
+from .resources.audio import audio
+from .resources.uploads import uploads
+from .resources.responses import responses
+from .resources.fine_tuning import fine_tuning
+from .resources.vector_stores import vector_stores
+
+__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"]
+
+
+class OpenAI(SyncAPIClient):
+    completions: completions.Completions
+    chat: chat.Chat
+    embeddings: embeddings.Embeddings
+    files: files.Files
+    images: images.Images
+    audio: audio.Audio
+    moderations: moderations.Moderations
+    models: models.Models
+    fine_tuning: fine_tuning.FineTuning
+    vector_stores: vector_stores.VectorStores
+    beta: beta.Beta
+    batches: batches.Batches
+    uploads: uploads.Uploads
+    responses: responses.Responses
+    with_raw_response: OpenAIWithRawResponse
+    with_streaming_response: OpenAIWithStreamedResponse
+
+    # client options
+    api_key: str
+    organization: str | None
+    project: str | None
+
+    websocket_base_url: str | httpx.URL | None
+    """Base URL for WebSocket connections.
+
+    If not specified, the default base URL will be used, with 'wss://' replacing the
+    'http://' or 'https://' scheme. For example: 'http://example.com' becomes
+    'wss://example.com'
+    """
+
+    def __init__(
+        self,
+        *,
+        api_key: str | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        base_url: str | httpx.URL | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        # Configure a custom httpx client.
+        # We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
+        # See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
+        http_client: httpx.Client | None = None,
+        # Enable or disable schema validation for data returned by the API.
+        # When enabled an error APIResponseValidationError is raised
+        # if the API responds with invalid data for the expected schema.
+        #
+        # This parameter may be removed or changed in the future.
+        # If you rely on this feature, please open a GitHub issue
+        # outlining your use-case to help us decide if it should be
+        # part of our public interface in the future.
+        _strict_response_validation: bool = False,
+    ) -> None:
+        """Construct a new synchronous OpenAI client instance.
+
+        This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+        - `api_key` from `OPENAI_API_KEY`
+        - `organization` from `OPENAI_ORG_ID`
+        - `project` from `OPENAI_PROJECT_ID`
+        """
+        if api_key is None:
+            api_key = os.environ.get("OPENAI_API_KEY")
+        if api_key is None:
+            raise OpenAIError(
+                "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
+            )
+        self.api_key = api_key
+
+        if organization is None:
+            organization = os.environ.get("OPENAI_ORG_ID")
+        self.organization = organization
+
+        if project is None:
+            project = os.environ.get("OPENAI_PROJECT_ID")
+        self.project = project
+
+        self.websocket_base_url = websocket_base_url
+
+        if base_url is None:
+            base_url = os.environ.get("OPENAI_BASE_URL")
+        if base_url is None:
+            base_url = f"https://api.openai.com/v1"
+
+        super().__init__(
+            version=__version__,
+            base_url=base_url,
+            max_retries=max_retries,
+            timeout=timeout,
+            http_client=http_client,
+            custom_headers=default_headers,
+            custom_query=default_query,
+            _strict_response_validation=_strict_response_validation,
+        )
+
+        self._default_stream_cls = Stream
+
+        self.completions = completions.Completions(self)
+        self.chat = chat.Chat(self)
+        self.embeddings = embeddings.Embeddings(self)
+        self.files = files.Files(self)
+        self.images = images.Images(self)
+        self.audio = audio.Audio(self)
+        self.moderations = moderations.Moderations(self)
+        self.models = models.Models(self)
+        self.fine_tuning = fine_tuning.FineTuning(self)
+        self.vector_stores = vector_stores.VectorStores(self)
+        self.beta = beta.Beta(self)
+        self.batches = batches.Batches(self)
+        self.uploads = uploads.Uploads(self)
+        self.responses = responses.Responses(self)
+        self.with_raw_response = OpenAIWithRawResponse(self)
+        self.with_streaming_response = OpenAIWithStreamedResponse(self)
+
+    @property
+    @override
+    def qs(self) -> Querystring:
+        return Querystring(array_format="brackets")
+
+    @property
+    @override
+    def auth_headers(self) -> dict[str, str]:
+        api_key = self.api_key
+        return {"Authorization": f"Bearer {api_key}"}
+
+    @property
+    @override
+    def default_headers(self) -> dict[str, str | Omit]:
+        return {
+            **super().default_headers,
+            "X-Stainless-Async": "false",
+            "OpenAI-Organization": self.organization if self.organization is not None else Omit(),
+            "OpenAI-Project": self.project if self.project is not None else Omit(),
+            **self._custom_headers,
+        }
+
+    def copy(
+        self,
+        *,
+        api_key: str | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        http_client: httpx.Client | None = None,
+        max_retries: int | NotGiven = NOT_GIVEN,
+        default_headers: Mapping[str, str] | None = None,
+        set_default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        set_default_query: Mapping[str, object] | None = None,
+        _extra_kwargs: Mapping[str, Any] = {},
+    ) -> Self:
+        """
+        Create a new client instance re-using the same options given to the current client with optional overriding.
+        """
+        if default_headers is not None and set_default_headers is not None:
+            raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+
+        if default_query is not None and set_default_query is not None:
+            raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+
+        headers = self._custom_headers
+        if default_headers is not None:
+            headers = {**headers, **default_headers}
+        elif set_default_headers is not None:
+            headers = set_default_headers
+
+        params = self._custom_query
+        if default_query is not None:
+            params = {**params, **default_query}
+        elif set_default_query is not None:
+            params = set_default_query
+
+        http_client = http_client or self._client
+        return self.__class__(
+            api_key=api_key or self.api_key,
+            organization=organization or self.organization,
+            project=project or self.project,
+            websocket_base_url=websocket_base_url or self.websocket_base_url,
+            base_url=base_url or self.base_url,
+            timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
+            http_client=http_client,
+            max_retries=max_retries if is_given(max_retries) else self.max_retries,
+            default_headers=headers,
+            default_query=params,
+            **_extra_kwargs,
+        )
+
+    # Alias for `copy` for nicer inline usage, e.g.
+    # client.with_options(timeout=10).foo.create(...)
+    with_options = copy
+
+    @override
+    def _make_status_error(
+        self,
+        err_msg: str,
+        *,
+        body: object,
+        response: httpx.Response,
+    ) -> APIStatusError:
+        data = body.get("error", body) if is_mapping(body) else body
+        if response.status_code == 400:
+            return _exceptions.BadRequestError(err_msg, response=response, body=data)
+
+        if response.status_code == 401:
+            return _exceptions.AuthenticationError(err_msg, response=response, body=data)
+
+        if response.status_code == 403:
+            return _exceptions.PermissionDeniedError(err_msg, response=response, body=data)
+
+        if response.status_code == 404:
+            return _exceptions.NotFoundError(err_msg, response=response, body=data)
+
+        if response.status_code == 409:
+            return _exceptions.ConflictError(err_msg, response=response, body=data)
+
+        if response.status_code == 422:
+            return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data)
+
+        if response.status_code == 429:
+            return _exceptions.RateLimitError(err_msg, response=response, body=data)
+
+        if response.status_code >= 500:
+            return _exceptions.InternalServerError(err_msg, response=response, body=data)
+        return APIStatusError(err_msg, response=response, body=data)
+
+
+class AsyncOpenAI(AsyncAPIClient):
+    completions: completions.AsyncCompletions
+    chat: chat.AsyncChat
+    embeddings: embeddings.AsyncEmbeddings
+    files: files.AsyncFiles
+    images: images.AsyncImages
+    audio: audio.AsyncAudio
+    moderations: moderations.AsyncModerations
+    models: models.AsyncModels
+    fine_tuning: fine_tuning.AsyncFineTuning
+    vector_stores: vector_stores.AsyncVectorStores
+    beta: beta.AsyncBeta
+    batches: batches.AsyncBatches
+    uploads: uploads.AsyncUploads
+    responses: responses.AsyncResponses
+    with_raw_response: AsyncOpenAIWithRawResponse
+    with_streaming_response: AsyncOpenAIWithStreamedResponse
+
+    # client options
+    api_key: str
+    organization: str | None
+    project: str | None
+
+    websocket_base_url: str | httpx.URL | None
+    """Base URL for WebSocket connections.
+
+    If not specified, the default base URL will be used, with 'wss://' replacing the
+    'http://' or 'https://' scheme. For example: 'http://example.com' becomes
+    'wss://example.com'
+    """
+
+    def __init__(
+        self,
+        *,
+        api_key: str | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        base_url: str | httpx.URL | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        # Configure a custom httpx client.
+        # We provide a `DefaultAsyncHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
+        # See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details.
+        http_client: httpx.AsyncClient | None = None,
+        # Enable or disable schema validation for data returned by the API.
+        # When enabled an error APIResponseValidationError is raised
+        # if the API responds with invalid data for the expected schema.
+        #
+        # This parameter may be removed or changed in the future.
+        # If you rely on this feature, please open a GitHub issue
+        # outlining your use-case to help us decide if it should be
+        # part of our public interface in the future.
+        _strict_response_validation: bool = False,
+    ) -> None:
+        """Construct a new async AsyncOpenAI client instance.
+
+        This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+        - `api_key` from `OPENAI_API_KEY`
+        - `organization` from `OPENAI_ORG_ID`
+        - `project` from `OPENAI_PROJECT_ID`
+        """
+        if api_key is None:
+            api_key = os.environ.get("OPENAI_API_KEY")
+        if api_key is None:
+            raise OpenAIError(
+                "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
+            )
+        self.api_key = api_key
+
+        if organization is None:
+            organization = os.environ.get("OPENAI_ORG_ID")
+        self.organization = organization
+
+        if project is None:
+            project = os.environ.get("OPENAI_PROJECT_ID")
+        self.project = project
+
+        self.websocket_base_url = websocket_base_url
+
+        if base_url is None:
+            base_url = os.environ.get("OPENAI_BASE_URL")
+        if base_url is None:
+            base_url = f"https://api.openai.com/v1"
+
+        super().__init__(
+            version=__version__,
+            base_url=base_url,
+            max_retries=max_retries,
+            timeout=timeout,
+            http_client=http_client,
+            custom_headers=default_headers,
+            custom_query=default_query,
+            _strict_response_validation=_strict_response_validation,
+        )
+
+        self._default_stream_cls = AsyncStream
+
+        self.completions = completions.AsyncCompletions(self)
+        self.chat = chat.AsyncChat(self)
+        self.embeddings = embeddings.AsyncEmbeddings(self)
+        self.files = files.AsyncFiles(self)
+        self.images = images.AsyncImages(self)
+        self.audio = audio.AsyncAudio(self)
+        self.moderations = moderations.AsyncModerations(self)
+        self.models = models.AsyncModels(self)
+        self.fine_tuning = fine_tuning.AsyncFineTuning(self)
+        self.vector_stores = vector_stores.AsyncVectorStores(self)
+        self.beta = beta.AsyncBeta(self)
+        self.batches = batches.AsyncBatches(self)
+        self.uploads = uploads.AsyncUploads(self)
+        self.responses = responses.AsyncResponses(self)
+        self.with_raw_response = AsyncOpenAIWithRawResponse(self)
+        self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self)
+
+    @property
+    @override
+    def qs(self) -> Querystring:
+        return Querystring(array_format="brackets")
+
+    @property
+    @override
+    def auth_headers(self) -> dict[str, str]:
+        api_key = self.api_key
+        return {"Authorization": f"Bearer {api_key}"}
+
+    @property
+    @override
+    def default_headers(self) -> dict[str, str | Omit]:
+        return {
+            **super().default_headers,
+            "X-Stainless-Async": f"async:{get_async_library()}",
+            "OpenAI-Organization": self.organization if self.organization is not None else Omit(),
+            "OpenAI-Project": self.project if self.project is not None else Omit(),
+            **self._custom_headers,
+        }
+
+    def copy(
+        self,
+        *,
+        api_key: str | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        http_client: httpx.AsyncClient | None = None,
+        max_retries: int | NotGiven = NOT_GIVEN,
+        default_headers: Mapping[str, str] | None = None,
+        set_default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        set_default_query: Mapping[str, object] | None = None,
+        _extra_kwargs: Mapping[str, Any] = {},
+    ) -> Self:
+        """
+        Create a new client instance re-using the same options given to the current client with optional overriding.
+        """
+        if default_headers is not None and set_default_headers is not None:
+            raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+
+        if default_query is not None and set_default_query is not None:
+            raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+
+        headers = self._custom_headers
+        if default_headers is not None:
+            headers = {**headers, **default_headers}
+        elif set_default_headers is not None:
+            headers = set_default_headers
+
+        params = self._custom_query
+        if default_query is not None:
+            params = {**params, **default_query}
+        elif set_default_query is not None:
+            params = set_default_query
+
+        http_client = http_client or self._client
+        return self.__class__(
+            api_key=api_key or self.api_key,
+            organization=organization or self.organization,
+            project=project or self.project,
+            websocket_base_url=websocket_base_url or self.websocket_base_url,
+            base_url=base_url or self.base_url,
+            timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
+            http_client=http_client,
+            max_retries=max_retries if is_given(max_retries) else self.max_retries,
+            default_headers=headers,
+            default_query=params,
+            **_extra_kwargs,
+        )
+
+    # Alias for `copy` for nicer inline usage, e.g.
+    # client.with_options(timeout=10).foo.create(...)
+    with_options = copy
+
+    @override
+    def _make_status_error(
+        self,
+        err_msg: str,
+        *,
+        body: object,
+        response: httpx.Response,
+    ) -> APIStatusError:
+        data = body.get("error", body) if is_mapping(body) else body
+        if response.status_code == 400:
+            return _exceptions.BadRequestError(err_msg, response=response, body=data)
+
+        if response.status_code == 401:
+            return _exceptions.AuthenticationError(err_msg, response=response, body=data)
+
+        if response.status_code == 403:
+            return _exceptions.PermissionDeniedError(err_msg, response=response, body=data)
+
+        if response.status_code == 404:
+            return _exceptions.NotFoundError(err_msg, response=response, body=data)
+
+        if response.status_code == 409:
+            return _exceptions.ConflictError(err_msg, response=response, body=data)
+
+        if response.status_code == 422:
+            return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data)
+
+        if response.status_code == 429:
+            return _exceptions.RateLimitError(err_msg, response=response, body=data)
+
+        if response.status_code >= 500:
+            return _exceptions.InternalServerError(err_msg, response=response, body=data)
+        return APIStatusError(err_msg, response=response, body=data)
+
+
+class OpenAIWithRawResponse:
+    def __init__(self, client: OpenAI) -> None:
+        self.completions = completions.CompletionsWithRawResponse(client.completions)
+        self.chat = chat.ChatWithRawResponse(client.chat)
+        self.embeddings = embeddings.EmbeddingsWithRawResponse(client.embeddings)
+        self.files = files.FilesWithRawResponse(client.files)
+        self.images = images.ImagesWithRawResponse(client.images)
+        self.audio = audio.AudioWithRawResponse(client.audio)
+        self.moderations = moderations.ModerationsWithRawResponse(client.moderations)
+        self.models = models.ModelsWithRawResponse(client.models)
+        self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.VectorStoresWithRawResponse(client.vector_stores)
+        self.beta = beta.BetaWithRawResponse(client.beta)
+        self.batches = batches.BatchesWithRawResponse(client.batches)
+        self.uploads = uploads.UploadsWithRawResponse(client.uploads)
+        self.responses = responses.ResponsesWithRawResponse(client.responses)
+
+
+class AsyncOpenAIWithRawResponse:
+    def __init__(self, client: AsyncOpenAI) -> None:
+        self.completions = completions.AsyncCompletionsWithRawResponse(client.completions)
+        self.chat = chat.AsyncChatWithRawResponse(client.chat)
+        self.embeddings = embeddings.AsyncEmbeddingsWithRawResponse(client.embeddings)
+        self.files = files.AsyncFilesWithRawResponse(client.files)
+        self.images = images.AsyncImagesWithRawResponse(client.images)
+        self.audio = audio.AsyncAudioWithRawResponse(client.audio)
+        self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations)
+        self.models = models.AsyncModelsWithRawResponse(client.models)
+        self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.AsyncVectorStoresWithRawResponse(client.vector_stores)
+        self.beta = beta.AsyncBetaWithRawResponse(client.beta)
+        self.batches = batches.AsyncBatchesWithRawResponse(client.batches)
+        self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads)
+        self.responses = responses.AsyncResponsesWithRawResponse(client.responses)
+
+
+class OpenAIWithStreamedResponse:
+    def __init__(self, client: OpenAI) -> None:
+        self.completions = completions.CompletionsWithStreamingResponse(client.completions)
+        self.chat = chat.ChatWithStreamingResponse(client.chat)
+        self.embeddings = embeddings.EmbeddingsWithStreamingResponse(client.embeddings)
+        self.files = files.FilesWithStreamingResponse(client.files)
+        self.images = images.ImagesWithStreamingResponse(client.images)
+        self.audio = audio.AudioWithStreamingResponse(client.audio)
+        self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations)
+        self.models = models.ModelsWithStreamingResponse(client.models)
+        self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.VectorStoresWithStreamingResponse(client.vector_stores)
+        self.beta = beta.BetaWithStreamingResponse(client.beta)
+        self.batches = batches.BatchesWithStreamingResponse(client.batches)
+        self.uploads = uploads.UploadsWithStreamingResponse(client.uploads)
+        self.responses = responses.ResponsesWithStreamingResponse(client.responses)
+
+
+class AsyncOpenAIWithStreamedResponse:
+    def __init__(self, client: AsyncOpenAI) -> None:
+        self.completions = completions.AsyncCompletionsWithStreamingResponse(client.completions)
+        self.chat = chat.AsyncChatWithStreamingResponse(client.chat)
+        self.embeddings = embeddings.AsyncEmbeddingsWithStreamingResponse(client.embeddings)
+        self.files = files.AsyncFilesWithStreamingResponse(client.files)
+        self.images = images.AsyncImagesWithStreamingResponse(client.images)
+        self.audio = audio.AsyncAudioWithStreamingResponse(client.audio)
+        self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations)
+        self.models = models.AsyncModelsWithStreamingResponse(client.models)
+        self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning)
+        self.vector_stores = vector_stores.AsyncVectorStoresWithStreamingResponse(client.vector_stores)
+        self.beta = beta.AsyncBetaWithStreamingResponse(client.beta)
+        self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches)
+        self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads)
+        self.responses = responses.AsyncResponsesWithStreamingResponse(client.responses)
+
+
+Client = OpenAI
+
+AsyncClient = AsyncOpenAI
diff --git a/.venv/lib/python3.12/site-packages/openai/_compat.py b/.venv/lib/python3.12/site-packages/openai/_compat.py
new file mode 100644
index 00000000..87fc3707
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_compat.py
@@ -0,0 +1,231 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
+from datetime import date, datetime
+from typing_extensions import Self, Literal
+
+import pydantic
+from pydantic.fields import FieldInfo
+
+from ._types import IncEx, StrBytesIntFloat
+
+_T = TypeVar("_T")
+_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
+
+# --------------- Pydantic v2 compatibility ---------------
+
+# Pyright incorrectly reports some of our functions as overriding a method when they don't
+# pyright: reportIncompatibleMethodOverride=false
+
+PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
+
+# v1 re-exports
+if TYPE_CHECKING:
+
+    def parse_date(value: date | StrBytesIntFloat) -> date:  # noqa: ARG001
+        ...
+
+    def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:  # noqa: ARG001
+        ...
+
+    def get_args(t: type[Any]) -> tuple[Any, ...]:  # noqa: ARG001
+        ...
+
+    def is_union(tp: type[Any] | None) -> bool:  # noqa: ARG001
+        ...
+
+    def get_origin(t: type[Any]) -> type[Any] | None:  # noqa: ARG001
+        ...
+
+    def is_literal_type(type_: type[Any]) -> bool:  # noqa: ARG001
+        ...
+
+    def is_typeddict(type_: type[Any]) -> bool:  # noqa: ARG001
+        ...
+
+else:
+    if PYDANTIC_V2:
+        from pydantic.v1.typing import (
+            get_args as get_args,
+            is_union as is_union,
+            get_origin as get_origin,
+            is_typeddict as is_typeddict,
+            is_literal_type as is_literal_type,
+        )
+        from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
+    else:
+        from pydantic.typing import (
+            get_args as get_args,
+            is_union as is_union,
+            get_origin as get_origin,
+            is_typeddict as is_typeddict,
+            is_literal_type as is_literal_type,
+        )
+        from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
+
+
+# refactored config
+if TYPE_CHECKING:
+    from pydantic import ConfigDict as ConfigDict
+else:
+    if PYDANTIC_V2:
+        from pydantic import ConfigDict
+    else:
+        # TODO: provide an error message here?
+        ConfigDict = None
+
+
+# renamed methods / properties
+def parse_obj(model: type[_ModelT], value: object) -> _ModelT:
+    if PYDANTIC_V2:
+        return model.model_validate(value)
+    else:
+        return cast(_ModelT, model.parse_obj(value))  # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+
+
+def field_is_required(field: FieldInfo) -> bool:
+    if PYDANTIC_V2:
+        return field.is_required()
+    return field.required  # type: ignore
+
+
+def field_get_default(field: FieldInfo) -> Any:
+    value = field.get_default()
+    if PYDANTIC_V2:
+        from pydantic_core import PydanticUndefined
+
+        if value == PydanticUndefined:
+            return None
+        return value
+    return value
+
+
+def field_outer_type(field: FieldInfo) -> Any:
+    if PYDANTIC_V2:
+        return field.annotation
+    return field.outer_type_  # type: ignore
+
+
+def get_model_config(model: type[pydantic.BaseModel]) -> Any:
+    if PYDANTIC_V2:
+        return model.model_config
+    return model.__config__  # type: ignore
+
+
+def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
+    if PYDANTIC_V2:
+        return model.model_fields
+    return model.__fields__  # type: ignore
+
+
+def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
+    if PYDANTIC_V2:
+        return model.model_copy(deep=deep)
+    return model.copy(deep=deep)  # type: ignore
+
+
+def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
+    if PYDANTIC_V2:
+        return model.model_dump_json(indent=indent)
+    return model.json(indent=indent)  # type: ignore
+
+
+def model_dump(
+    model: pydantic.BaseModel,
+    *,
+    exclude: IncEx | None = None,
+    exclude_unset: bool = False,
+    exclude_defaults: bool = False,
+    warnings: bool = True,
+    mode: Literal["json", "python"] = "python",
+) -> dict[str, Any]:
+    if PYDANTIC_V2 or hasattr(model, "model_dump"):
+        return model.model_dump(
+            mode=mode,
+            exclude=exclude,
+            exclude_unset=exclude_unset,
+            exclude_defaults=exclude_defaults,
+            # warnings are not supported in Pydantic v1
+            warnings=warnings if PYDANTIC_V2 else True,
+        )
+    return cast(
+        "dict[str, Any]",
+        model.dict(  # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+            exclude=exclude,
+            exclude_unset=exclude_unset,
+            exclude_defaults=exclude_defaults,
+        ),
+    )
+
+
+def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
+    if PYDANTIC_V2:
+        return model.model_validate(data)
+    return model.parse_obj(data)  # pyright: ignore[reportDeprecated]
+
+
+def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT:
+    if PYDANTIC_V2:
+        return model.model_validate_json(data)
+    return model.parse_raw(data)  # pyright: ignore[reportDeprecated]
+
+
+def model_json_schema(model: type[_ModelT]) -> dict[str, Any]:
+    if PYDANTIC_V2:
+        return model.model_json_schema()
+    return model.schema()  # pyright: ignore[reportDeprecated]
+
+
+# generic models
+if TYPE_CHECKING:
+
+    class GenericModel(pydantic.BaseModel): ...
+
+else:
+    if PYDANTIC_V2:
+        # there no longer needs to be a distinction in v2 but
+        # we still have to create our own subclass to avoid
+        # inconsistent MRO ordering errors
+        class GenericModel(pydantic.BaseModel): ...
+
+    else:
+        import pydantic.generics
+
+        class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
+
+
+# cached properties
+if TYPE_CHECKING:
+    cached_property = property
+
+    # we define a separate type (copied from typeshed)
+    # that represents that `cached_property` is `set`able
+    # at runtime, which differs from `@property`.
+    #
+    # this is a separate type as editors likely special case
+    # `@property` and we don't want to cause issues just to have
+    # more helpful internal types.
+
+    class typed_cached_property(Generic[_T]):
+        func: Callable[[Any], _T]
+        attrname: str | None
+
+        def __init__(self, func: Callable[[Any], _T]) -> None: ...
+
+        @overload
+        def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
+
+        @overload
+        def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
+
+        def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
+            raise NotImplementedError()
+
+        def __set_name__(self, owner: type[Any], name: str) -> None: ...
+
+        # __set__ is not defined at runtime, but @cached_property is designed to be settable
+        def __set__(self, instance: object, value: _T) -> None: ...
+else:
+    from functools import cached_property as cached_property
+
+    typed_cached_property = cached_property
diff --git a/.venv/lib/python3.12/site-packages/openai/_constants.py b/.venv/lib/python3.12/site-packages/openai/_constants.py
new file mode 100644
index 00000000..7029dc72
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_constants.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import httpx
+
+RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response"
+OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to"
+
+# default timeout is 10 minutes
+DEFAULT_TIMEOUT = httpx.Timeout(timeout=600, connect=5.0)
+DEFAULT_MAX_RETRIES = 2
+DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=100)
+
+INITIAL_RETRY_DELAY = 0.5
+MAX_RETRY_DELAY = 8.0
diff --git a/.venv/lib/python3.12/site-packages/openai/_exceptions.py b/.venv/lib/python3.12/site-packages/openai/_exceptions.py
new file mode 100644
index 00000000..e326ed95
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_exceptions.py
@@ -0,0 +1,156 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Optional, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ._utils import is_dict
+from ._models import construct_type
+
+if TYPE_CHECKING:
+    from .types.chat import ChatCompletion
+
+__all__ = [
+    "BadRequestError",
+    "AuthenticationError",
+    "PermissionDeniedError",
+    "NotFoundError",
+    "ConflictError",
+    "UnprocessableEntityError",
+    "RateLimitError",
+    "InternalServerError",
+    "LengthFinishReasonError",
+    "ContentFilterFinishReasonError",
+]
+
+
+class OpenAIError(Exception):
+    pass
+
+
+class APIError(OpenAIError):
+    message: str
+    request: httpx.Request
+
+    body: object | None
+    """The API response body.
+
+    If the API responded with a valid JSON structure then this property will be the
+    decoded result.
+
+    If it isn't a valid JSON structure then this will be the raw response.
+
+    If there was no response associated with this error then it will be `None`.
+    """
+
+    code: Optional[str] = None
+    param: Optional[str] = None
+    type: Optional[str]
+
+    def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None:
+        super().__init__(message)
+        self.request = request
+        self.message = message
+        self.body = body
+
+        if is_dict(body):
+            self.code = cast(Any, construct_type(type_=Optional[str], value=body.get("code")))
+            self.param = cast(Any, construct_type(type_=Optional[str], value=body.get("param")))
+            self.type = cast(Any, construct_type(type_=str, value=body.get("type")))
+        else:
+            self.code = None
+            self.param = None
+            self.type = None
+
+
+class APIResponseValidationError(APIError):
+    response: httpx.Response
+    status_code: int
+
+    def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None:
+        super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body)
+        self.response = response
+        self.status_code = response.status_code
+
+
+class APIStatusError(APIError):
+    """Raised when an API response has a status code of 4xx or 5xx."""
+
+    response: httpx.Response
+    status_code: int
+    request_id: str | None
+
+    def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
+        super().__init__(message, response.request, body=body)
+        self.response = response
+        self.status_code = response.status_code
+        self.request_id = response.headers.get("x-request-id")
+
+
+class APIConnectionError(APIError):
+    def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None:
+        super().__init__(message, request, body=None)
+
+
+class APITimeoutError(APIConnectionError):
+    def __init__(self, request: httpx.Request) -> None:
+        super().__init__(message="Request timed out.", request=request)
+
+
+class BadRequestError(APIStatusError):
+    status_code: Literal[400] = 400  # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class AuthenticationError(APIStatusError):
+    status_code: Literal[401] = 401  # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class PermissionDeniedError(APIStatusError):
+    status_code: Literal[403] = 403  # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class NotFoundError(APIStatusError):
+    status_code: Literal[404] = 404  # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class ConflictError(APIStatusError):
+    status_code: Literal[409] = 409  # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class UnprocessableEntityError(APIStatusError):
+    status_code: Literal[422] = 422  # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class RateLimitError(APIStatusError):
+    status_code: Literal[429] = 429  # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class InternalServerError(APIStatusError):
+    pass
+
+
+class LengthFinishReasonError(OpenAIError):
+    completion: ChatCompletion
+    """The completion that caused this error.
+
+    Note: this will *not* be a complete `ChatCompletion` object when streaming as `usage`
+          will not be included.
+    """
+
+    def __init__(self, *, completion: ChatCompletion) -> None:
+        msg = "Could not parse response content as the length limit was reached"
+        if completion.usage:
+            msg += f" - {completion.usage}"
+
+        super().__init__(msg)
+        self.completion = completion
+
+
+class ContentFilterFinishReasonError(OpenAIError):
+    def __init__(self) -> None:
+        super().__init__(
+            f"Could not parse response content as the request was rejected by the content filter",
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/_extras/__init__.py b/.venv/lib/python3.12/site-packages/openai/_extras/__init__.py
new file mode 100644
index 00000000..692de248
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_extras/__init__.py
@@ -0,0 +1,3 @@
+from .numpy_proxy import numpy as numpy, has_numpy as has_numpy
+from .pandas_proxy import pandas as pandas
+from .sounddevice_proxy import sounddevice as sounddevice
diff --git a/.venv/lib/python3.12/site-packages/openai/_extras/_common.py b/.venv/lib/python3.12/site-packages/openai/_extras/_common.py
new file mode 100644
index 00000000..6e71720e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_extras/_common.py
@@ -0,0 +1,21 @@
+from .._exceptions import OpenAIError
+
+INSTRUCTIONS = """
+
+OpenAI error:
+
+    missing `{library}`
+
+This feature requires additional dependencies:
+
+    $ pip install openai[{extra}]
+
+"""
+
+
+def format_instructions(*, library: str, extra: str) -> str:
+    return INSTRUCTIONS.format(library=library, extra=extra)
+
+
+class MissingDependencyError(OpenAIError):
+    pass
diff --git a/.venv/lib/python3.12/site-packages/openai/_extras/numpy_proxy.py b/.venv/lib/python3.12/site-packages/openai/_extras/numpy_proxy.py
new file mode 100644
index 00000000..2b066957
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_extras/numpy_proxy.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+from typing_extensions import override
+
+from .._utils import LazyProxy
+from ._common import MissingDependencyError, format_instructions
+
+if TYPE_CHECKING:
+    import numpy as numpy
+
+
+NUMPY_INSTRUCTIONS = format_instructions(library="numpy", extra="voice_helpers")
+
+
+class NumpyProxy(LazyProxy[Any]):
+    @override
+    def __load__(self) -> Any:
+        try:
+            import numpy
+        except ImportError as err:
+            raise MissingDependencyError(NUMPY_INSTRUCTIONS) from err
+
+        return numpy
+
+
+if not TYPE_CHECKING:
+    numpy = NumpyProxy()
+
+
+def has_numpy() -> bool:
+    try:
+        import numpy  # noqa: F401  # pyright: ignore[reportUnusedImport]
+    except ImportError:
+        return False
+
+    return True
diff --git a/.venv/lib/python3.12/site-packages/openai/_extras/pandas_proxy.py b/.venv/lib/python3.12/site-packages/openai/_extras/pandas_proxy.py
new file mode 100644
index 00000000..686377ba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_extras/pandas_proxy.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+from typing_extensions import override
+
+from .._utils import LazyProxy
+from ._common import MissingDependencyError, format_instructions
+
+if TYPE_CHECKING:
+    import pandas as pandas
+
+
+PANDAS_INSTRUCTIONS = format_instructions(library="pandas", extra="datalib")
+
+
+class PandasProxy(LazyProxy[Any]):
+    @override
+    def __load__(self) -> Any:
+        try:
+            import pandas
+        except ImportError as err:
+            raise MissingDependencyError(PANDAS_INSTRUCTIONS) from err
+
+        return pandas
+
+
+if not TYPE_CHECKING:
+    pandas = PandasProxy()
diff --git a/.venv/lib/python3.12/site-packages/openai/_extras/sounddevice_proxy.py b/.venv/lib/python3.12/site-packages/openai/_extras/sounddevice_proxy.py
new file mode 100644
index 00000000..482d4c68
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_extras/sounddevice_proxy.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+from typing_extensions import override
+
+from .._utils import LazyProxy
+from ._common import MissingDependencyError, format_instructions
+
+if TYPE_CHECKING:
+    import sounddevice as sounddevice  # type: ignore
+
+
+SOUNDDEVICE_INSTRUCTIONS = format_instructions(library="sounddevice", extra="voice_helpers")
+
+
+class SounddeviceProxy(LazyProxy[Any]):
+    @override
+    def __load__(self) -> Any:
+        try:
+            import sounddevice  # type: ignore
+        except ImportError as err:
+            raise MissingDependencyError(SOUNDDEVICE_INSTRUCTIONS) from err
+
+        return sounddevice
+
+
+if not TYPE_CHECKING:
+    sounddevice = SounddeviceProxy()
diff --git a/.venv/lib/python3.12/site-packages/openai/_files.py b/.venv/lib/python3.12/site-packages/openai/_files.py
new file mode 100644
index 00000000..801a0d29
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_files.py
@@ -0,0 +1,123 @@
+from __future__ import annotations
+
+import io
+import os
+import pathlib
+from typing import overload
+from typing_extensions import TypeGuard
+
+import anyio
+
+from ._types import (
+    FileTypes,
+    FileContent,
+    RequestFiles,
+    HttpxFileTypes,
+    Base64FileInput,
+    HttpxFileContent,
+    HttpxRequestFiles,
+)
+from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
+
+
+def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
+    return isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike)
+
+
+def is_file_content(obj: object) -> TypeGuard[FileContent]:
+    return (
+        isinstance(obj, bytes) or isinstance(obj, tuple) or isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike)
+    )
+
+
+def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
+    if not is_file_content(obj):
+        prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
+        raise RuntimeError(
+            f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/openai/openai-python/tree/main#file-uploads"
+        ) from None
+
+
+@overload
+def to_httpx_files(files: None) -> None: ...
+
+
+@overload
+def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
+
+
+def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
+    if files is None:
+        return None
+
+    if is_mapping_t(files):
+        files = {key: _transform_file(file) for key, file in files.items()}
+    elif is_sequence_t(files):
+        files = [(key, _transform_file(file)) for key, file in files]
+    else:
+        raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence")
+
+    return files
+
+
+def _transform_file(file: FileTypes) -> HttpxFileTypes:
+    if is_file_content(file):
+        if isinstance(file, os.PathLike):
+            path = pathlib.Path(file)
+            return (path.name, path.read_bytes())
+
+        return file
+
+    if is_tuple_t(file):
+        return (file[0], _read_file_content(file[1]), *file[2:])
+
+    raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
+
+
+def _read_file_content(file: FileContent) -> HttpxFileContent:
+    if isinstance(file, os.PathLike):
+        return pathlib.Path(file).read_bytes()
+    return file
+
+
+@overload
+async def async_to_httpx_files(files: None) -> None: ...
+
+
+@overload
+async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
+
+
+async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
+    if files is None:
+        return None
+
+    if is_mapping_t(files):
+        files = {key: await _async_transform_file(file) for key, file in files.items()}
+    elif is_sequence_t(files):
+        files = [(key, await _async_transform_file(file)) for key, file in files]
+    else:
+        raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence")
+
+    return files
+
+
+async def _async_transform_file(file: FileTypes) -> HttpxFileTypes:
+    if is_file_content(file):
+        if isinstance(file, os.PathLike):
+            path = anyio.Path(file)
+            return (path.name, await path.read_bytes())
+
+        return file
+
+    if is_tuple_t(file):
+        return (file[0], await _async_read_file_content(file[1]), *file[2:])
+
+    raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
+
+
+async def _async_read_file_content(file: FileContent) -> HttpxFileContent:
+    if isinstance(file, os.PathLike):
+        return await anyio.Path(file).read_bytes()
+
+    return file
diff --git a/.venv/lib/python3.12/site-packages/openai/_legacy_response.py b/.venv/lib/python3.12/site-packages/openai/_legacy_response.py
new file mode 100644
index 00000000..8880e5f1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_legacy_response.py
@@ -0,0 +1,488 @@
+from __future__ import annotations
+
+import os
+import inspect
+import logging
+import datetime
+import functools
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Union,
+    Generic,
+    TypeVar,
+    Callable,
+    Iterator,
+    AsyncIterator,
+    cast,
+    overload,
+)
+from typing_extensions import Awaitable, ParamSpec, override, deprecated, get_origin
+
+import anyio
+import httpx
+import pydantic
+
+from ._types import NoneType
+from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type
+from ._models import BaseModel, is_basemodel, add_request_id
+from ._constants import RAW_RESPONSE_HEADER
+from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
+from ._exceptions import APIResponseValidationError
+
+if TYPE_CHECKING:
+    from ._models import FinalRequestOptions
+    from ._base_client import BaseClient
+
+
+P = ParamSpec("P")
+R = TypeVar("R")
+_T = TypeVar("_T")
+
+log: logging.Logger = logging.getLogger(__name__)
+
+
+class LegacyAPIResponse(Generic[R]):
+    """This is a legacy class as it will be replaced by `APIResponse`
+    and `AsyncAPIResponse` in the `_response.py` file in the next major
+    release.
+
+    For the sync client this will mostly be the same with the exception
+    of `content` & `text` will be methods instead of properties. In the
+    async client, all methods will be async.
+
+    A migration script will be provided & the migration in general should
+    be smooth.
+    """
+
+    _cast_to: type[R]
+    _client: BaseClient[Any, Any]
+    _parsed_by_type: dict[type[Any], Any]
+    _stream: bool
+    _stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
+    _options: FinalRequestOptions
+
+    http_response: httpx.Response
+
+    retries_taken: int
+    """The number of retries made. If no retries happened this will be `0`"""
+
+    def __init__(
+        self,
+        *,
+        raw: httpx.Response,
+        cast_to: type[R],
+        client: BaseClient[Any, Any],
+        stream: bool,
+        stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
+        options: FinalRequestOptions,
+        retries_taken: int = 0,
+    ) -> None:
+        self._cast_to = cast_to
+        self._client = client
+        self._parsed_by_type = {}
+        self._stream = stream
+        self._stream_cls = stream_cls
+        self._options = options
+        self.http_response = raw
+        self.retries_taken = retries_taken
+
+    @property
+    def request_id(self) -> str | None:
+        return self.http_response.headers.get("x-request-id")  # type: ignore[no-any-return]
+
+    @overload
+    def parse(self, *, to: type[_T]) -> _T: ...
+
+    @overload
+    def parse(self) -> R: ...
+
+    def parse(self, *, to: type[_T] | None = None) -> R | _T:
+        """Returns the rich python representation of this response's data.
+
+        NOTE: For the async client: this will become a coroutine in the next major version.
+
+        For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
+
+        You can customise the type that the response is parsed into through
+        the `to` argument, e.g.
+
+        ```py
+        from openai import BaseModel
+
+
+        class MyModel(BaseModel):
+            foo: str
+
+
+        obj = response.parse(to=MyModel)
+        print(obj.foo)
+        ```
+
+        We support parsing:
+          - `BaseModel`
+          - `dict`
+          - `list`
+          - `Union`
+          - `str`
+          - `int`
+          - `float`
+          - `httpx.Response`
+        """
+        cache_key = to if to is not None else self._cast_to
+        cached = self._parsed_by_type.get(cache_key)
+        if cached is not None:
+            return cached  # type: ignore[no-any-return]
+
+        parsed = self._parse(to=to)
+        if is_given(self._options.post_parser):
+            parsed = self._options.post_parser(parsed)
+
+        if isinstance(parsed, BaseModel):
+            add_request_id(parsed, self.request_id)
+
+        self._parsed_by_type[cache_key] = parsed
+        return cast(R, parsed)
+
+    @property
+    def headers(self) -> httpx.Headers:
+        return self.http_response.headers
+
+    @property
+    def http_request(self) -> httpx.Request:
+        return self.http_response.request
+
+    @property
+    def status_code(self) -> int:
+        return self.http_response.status_code
+
+    @property
+    def url(self) -> httpx.URL:
+        return self.http_response.url
+
+    @property
+    def method(self) -> str:
+        return self.http_request.method
+
+    @property
+    def content(self) -> bytes:
+        """Return the binary response content.
+
+        NOTE: this will be removed in favour of `.read()` in the
+        next major version.
+        """
+        return self.http_response.content
+
+    @property
+    def text(self) -> str:
+        """Return the decoded response content.
+
+        NOTE: this will be turned into a method in the next major version.
+        """
+        return self.http_response.text
+
+    @property
+    def http_version(self) -> str:
+        return self.http_response.http_version
+
+    @property
+    def is_closed(self) -> bool:
+        return self.http_response.is_closed
+
+    @property
+    def elapsed(self) -> datetime.timedelta:
+        """The time taken for the complete request/response cycle to complete."""
+        return self.http_response.elapsed
+
+    def _parse(self, *, to: type[_T] | None = None) -> R | _T:
+        cast_to = to if to is not None else self._cast_to
+
+        # unwrap `TypeAlias('Name', T)` -> `T`
+        if is_type_alias_type(cast_to):
+            cast_to = cast_to.__value__  # type: ignore[unreachable]
+
+        # unwrap `Annotated[T, ...]` -> `T`
+        if cast_to and is_annotated_type(cast_to):
+            cast_to = extract_type_arg(cast_to, 0)
+
+        origin = get_origin(cast_to) or cast_to
+
+        if self._stream:
+            if to:
+                if not is_stream_class_type(to):
+                    raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
+
+                return cast(
+                    _T,
+                    to(
+                        cast_to=extract_stream_chunk_type(
+                            to,
+                            failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
+                        ),
+                        response=self.http_response,
+                        client=cast(Any, self._client),
+                    ),
+                )
+
+            if self._stream_cls:
+                return cast(
+                    R,
+                    self._stream_cls(
+                        cast_to=extract_stream_chunk_type(self._stream_cls),
+                        response=self.http_response,
+                        client=cast(Any, self._client),
+                    ),
+                )
+
+            stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
+            if stream_cls is None:
+                raise MissingStreamClassError()
+
+            return cast(
+                R,
+                stream_cls(
+                    cast_to=cast_to,
+                    response=self.http_response,
+                    client=cast(Any, self._client),
+                ),
+            )
+
+        if cast_to is NoneType:
+            return cast(R, None)
+
+        response = self.http_response
+        if cast_to == str:
+            return cast(R, response.text)
+
+        if cast_to == int:
+            return cast(R, int(response.text))
+
+        if cast_to == float:
+            return cast(R, float(response.text))
+
+        if cast_to == bool:
+            return cast(R, response.text.lower() == "true")
+
+        if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent):
+            return cast(R, cast_to(response))  # type: ignore
+
+        if origin == LegacyAPIResponse:
+            raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
+
+        if inspect.isclass(
+            origin  # pyright: ignore[reportUnknownArgumentType]
+        ) and issubclass(origin, httpx.Response):
+            # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
+            # and pass that class to our request functions. We cannot change the variance to be either
+            # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
+            # the response class ourselves but that is something that should be supported directly in httpx
+            # as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
+            if cast_to != httpx.Response:
+                raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
+            return cast(R, response)
+
+        if (
+            inspect.isclass(
+                origin  # pyright: ignore[reportUnknownArgumentType]
+            )
+            and not issubclass(origin, BaseModel)
+            and issubclass(origin, pydantic.BaseModel)
+        ):
+            raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
+
+        if (
+            cast_to is not object
+            and not origin is list
+            and not origin is dict
+            and not origin is Union
+            and not issubclass(origin, BaseModel)
+        ):
+            raise RuntimeError(
+                f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
+            )
+
+        # split is required to handle cases where additional information is included
+        # in the response, e.g. application/json; charset=utf-8
+        content_type, *_ = response.headers.get("content-type", "*").split(";")
+        if content_type != "application/json":
+            if is_basemodel(cast_to):
+                try:
+                    data = response.json()
+                except Exception as exc:
+                    log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
+                else:
+                    return self._client._process_response_data(
+                        data=data,
+                        cast_to=cast_to,  # type: ignore
+                        response=response,
+                    )
+
+            if self._client._strict_response_validation:
+                raise APIResponseValidationError(
+                    response=response,
+                    message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
+                    body=response.text,
+                )
+
+            # If the API responds with content that isn't JSON then we just return
+            # the (decoded) text without performing any parsing so that you can still
+            # handle the response however you need to.
+            return response.text  # type: ignore
+
+        data = response.json()
+
+        return self._client._process_response_data(
+            data=data,
+            cast_to=cast_to,  # type: ignore
+            response=response,
+        )
+
+    @override
+    def __repr__(self) -> str:
+        return f"<APIResponse [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
+
+
+class MissingStreamClassError(TypeError):
+    def __init__(self) -> None:
+        super().__init__(
+            "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference",
+        )
+
+
+def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, LegacyAPIResponse[R]]:
+    """Higher order function that takes one of our bound API methods and wraps it
+    to support returning the raw `APIResponse` object directly.
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
+        extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "true"
+
+        kwargs["extra_headers"] = extra_headers
+
+        return cast(LegacyAPIResponse[R], func(*args, **kwargs))
+
+    return wrapped
+
+
+def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[LegacyAPIResponse[R]]]:
+    """Higher order function that takes one of our bound API methods and wraps it
+    to support returning the raw `APIResponse` object directly.
+    """
+
+    @functools.wraps(func)
+    async def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
+        extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "true"
+
+        kwargs["extra_headers"] = extra_headers
+
+        return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
+
+    return wrapped
+
+
+class HttpxBinaryResponseContent:
+    response: httpx.Response
+
+    def __init__(self, response: httpx.Response) -> None:
+        self.response = response
+
+    @property
+    def content(self) -> bytes:
+        return self.response.content
+
+    @property
+    def text(self) -> str:
+        return self.response.text
+
+    @property
+    def encoding(self) -> str | None:
+        return self.response.encoding
+
+    @property
+    def charset_encoding(self) -> str | None:
+        return self.response.charset_encoding
+
+    def json(self, **kwargs: Any) -> Any:
+        return self.response.json(**kwargs)
+
+    def read(self) -> bytes:
+        return self.response.read()
+
+    def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]:
+        return self.response.iter_bytes(chunk_size)
+
+    def iter_text(self, chunk_size: int | None = None) -> Iterator[str]:
+        return self.response.iter_text(chunk_size)
+
+    def iter_lines(self) -> Iterator[str]:
+        return self.response.iter_lines()
+
+    def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]:
+        return self.response.iter_raw(chunk_size)
+
+    def write_to_file(
+        self,
+        file: str | os.PathLike[str],
+    ) -> None:
+        """Write the output to the given file.
+
+        Accepts a filename or any path-like object, e.g. pathlib.Path
+
+        Note: if you want to stream the data to the file instead of writing
+        all at once then you should use `.with_streaming_response` when making
+        the API request, e.g. `client.with_streaming_response.foo().stream_to_file('my_filename.txt')`
+        """
+        with open(file, mode="wb") as f:
+            for data in self.response.iter_bytes():
+                f.write(data)
+
+    @deprecated(
+        "Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead"
+    )
+    def stream_to_file(
+        self,
+        file: str | os.PathLike[str],
+        *,
+        chunk_size: int | None = None,
+    ) -> None:
+        with open(file, mode="wb") as f:
+            for data in self.response.iter_bytes(chunk_size):
+                f.write(data)
+
+    def close(self) -> None:
+        return self.response.close()
+
+    async def aread(self) -> bytes:
+        return await self.response.aread()
+
+    async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
+        return self.response.aiter_bytes(chunk_size)
+
+    async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
+        return self.response.aiter_text(chunk_size)
+
+    async def aiter_lines(self) -> AsyncIterator[str]:
+        return self.response.aiter_lines()
+
+    async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
+        return self.response.aiter_raw(chunk_size)
+
+    @deprecated(
+        "Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead"
+    )
+    async def astream_to_file(
+        self,
+        file: str | os.PathLike[str],
+        *,
+        chunk_size: int | None = None,
+    ) -> None:
+        path = anyio.Path(file)
+        async with await path.open(mode="wb") as f:
+            async for data in self.response.aiter_bytes(chunk_size):
+                await f.write(data)
+
+    async def aclose(self) -> None:
+        return await self.response.aclose()
diff --git a/.venv/lib/python3.12/site-packages/openai/_models.py b/.venv/lib/python3.12/site-packages/openai/_models.py
new file mode 100644
index 00000000..ff7c1f33
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_models.py
@@ -0,0 +1,844 @@
+from __future__ import annotations
+
+import os
+import inspect
+from typing import TYPE_CHECKING, Any, Type, Tuple, Union, Generic, TypeVar, Callable, Optional, cast
+from datetime import date, datetime
+from typing_extensions import (
+    Unpack,
+    Literal,
+    ClassVar,
+    Protocol,
+    Required,
+    Sequence,
+    ParamSpec,
+    TypedDict,
+    TypeGuard,
+    final,
+    override,
+    runtime_checkable,
+)
+
+import pydantic
+import pydantic.generics
+from pydantic.fields import FieldInfo
+
+from ._types import (
+    Body,
+    IncEx,
+    Query,
+    ModelT,
+    Headers,
+    Timeout,
+    NotGiven,
+    AnyMapping,
+    HttpxRequestFiles,
+)
+from ._utils import (
+    PropertyInfo,
+    is_list,
+    is_given,
+    json_safe,
+    lru_cache,
+    is_mapping,
+    parse_date,
+    coerce_boolean,
+    parse_datetime,
+    strip_not_given,
+    extract_type_arg,
+    is_annotated_type,
+    is_type_alias_type,
+    strip_annotated_type,
+)
+from ._compat import (
+    PYDANTIC_V2,
+    ConfigDict,
+    GenericModel as BaseGenericModel,
+    get_args,
+    is_union,
+    parse_obj,
+    get_origin,
+    is_literal_type,
+    get_model_config,
+    get_model_fields,
+    field_get_default,
+)
+from ._constants import RAW_RESPONSE_HEADER
+
+if TYPE_CHECKING:
+    from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema
+
+__all__ = ["BaseModel", "GenericModel"]
+
+_T = TypeVar("_T")
+_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel")
+
+P = ParamSpec("P")
+
+ReprArgs = Sequence[Tuple[Optional[str], Any]]
+
+
+@runtime_checkable
+class _ConfigProtocol(Protocol):
+    allow_population_by_field_name: bool
+
+
+class BaseModel(pydantic.BaseModel):
+    if PYDANTIC_V2:
+        model_config: ClassVar[ConfigDict] = ConfigDict(
+            extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
+        )
+    else:
+
+        @property
+        @override
+        def model_fields_set(self) -> set[str]:
+            # a forwards-compat shim for pydantic v2
+            return self.__fields_set__  # type: ignore
+
+        class Config(pydantic.BaseConfig):  # pyright: ignore[reportDeprecated]
+            extra: Any = pydantic.Extra.allow  # type: ignore
+
+        @override
+        def __repr_args__(self) -> ReprArgs:
+            # we don't want these attributes to be included when something like `rich.print` is used
+            return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}]
+
+    if TYPE_CHECKING:
+        _request_id: Optional[str] = None
+        """The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.
+
+        This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:
+        
+        ```py
+        completion = await client.chat.completions.create(...)
+        completion._request_id  # req_id_xxx
+        completion.usage._request_id  # raises `AttributeError`
+        ```
+
+        Note: unlike other properties that use an `_` prefix, this property
+        *is* public. Unless documented otherwise, all other `_` prefix properties,
+        methods and modules are *private*.
+        """
+
+    def to_dict(
+        self,
+        *,
+        mode: Literal["json", "python"] = "python",
+        use_api_names: bool = True,
+        exclude_unset: bool = True,
+        exclude_defaults: bool = False,
+        exclude_none: bool = False,
+        warnings: bool = True,
+    ) -> dict[str, object]:
+        """Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+
+        By default, fields that were not set by the API will not be included,
+        and keys will match the API response, *not* the property names from the model.
+
+        For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
+        the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
+
+        Args:
+            mode:
+                If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`.
+                If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)`
+
+            use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
+            exclude_unset: Whether to exclude fields that have not been explicitly set.
+            exclude_defaults: Whether to exclude fields that are set to their default value from the output.
+            exclude_none: Whether to exclude fields that have a value of `None` from the output.
+            warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2.
+        """
+        return self.model_dump(
+            mode=mode,
+            by_alias=use_api_names,
+            exclude_unset=exclude_unset,
+            exclude_defaults=exclude_defaults,
+            exclude_none=exclude_none,
+            warnings=warnings,
+        )
+
+    def to_json(
+        self,
+        *,
+        indent: int | None = 2,
+        use_api_names: bool = True,
+        exclude_unset: bool = True,
+        exclude_defaults: bool = False,
+        exclude_none: bool = False,
+        warnings: bool = True,
+    ) -> str:
+        """Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation).
+
+        By default, fields that were not set by the API will not be included,
+        and keys will match the API response, *not* the property names from the model.
+
+        For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
+        the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
+
+        Args:
+            indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2`
+            use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
+            exclude_unset: Whether to exclude fields that have not been explicitly set.
+            exclude_defaults: Whether to exclude fields that have the default value.
+            exclude_none: Whether to exclude fields that have a value of `None`.
+            warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2.
+        """
+        return self.model_dump_json(
+            indent=indent,
+            by_alias=use_api_names,
+            exclude_unset=exclude_unset,
+            exclude_defaults=exclude_defaults,
+            exclude_none=exclude_none,
+            warnings=warnings,
+        )
+
+    @override
+    def __str__(self) -> str:
+        # mypy complains about an invalid self arg
+        return f"{self.__repr_name__()}({self.__repr_str__(', ')})"  # type: ignore[misc]
+
+    # Override the 'construct' method in a way that supports recursive parsing without validation.
+    # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836.
+    @classmethod
+    @override
+    def construct(  # pyright: ignore[reportIncompatibleMethodOverride]
+        __cls: Type[ModelT],
+        _fields_set: set[str] | None = None,
+        **values: object,
+    ) -> ModelT:
+        m = __cls.__new__(__cls)
+        fields_values: dict[str, object] = {}
+
+        config = get_model_config(__cls)
+        populate_by_name = (
+            config.allow_population_by_field_name
+            if isinstance(config, _ConfigProtocol)
+            else config.get("populate_by_name")
+        )
+
+        if _fields_set is None:
+            _fields_set = set()
+
+        model_fields = get_model_fields(__cls)
+        for name, field in model_fields.items():
+            key = field.alias
+            if key is None or (key not in values and populate_by_name):
+                key = name
+
+            if key in values:
+                fields_values[name] = _construct_field(value=values[key], field=field, key=key)
+                _fields_set.add(name)
+            else:
+                fields_values[name] = field_get_default(field)
+
+        _extra = {}
+        for key, value in values.items():
+            if key not in model_fields:
+                if PYDANTIC_V2:
+                    _extra[key] = value
+                else:
+                    _fields_set.add(key)
+                    fields_values[key] = value
+
+        object.__setattr__(m, "__dict__", fields_values)
+
+        if PYDANTIC_V2:
+            # these properties are copied from Pydantic's `model_construct()` method
+            object.__setattr__(m, "__pydantic_private__", None)
+            object.__setattr__(m, "__pydantic_extra__", _extra)
+            object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
+        else:
+            # init_private_attributes() does not exist in v2
+            m._init_private_attributes()  # type: ignore
+
+            # copied from Pydantic v1's `construct()` method
+            object.__setattr__(m, "__fields_set__", _fields_set)
+
+        return m
+
+    if not TYPE_CHECKING:
+        # type checkers incorrectly complain about this assignment
+        # because the type signatures are technically different
+        # although not in practice
+        model_construct = construct
+
+    if not PYDANTIC_V2:
+        # we define aliases for some of the new pydantic v2 methods so
+        # that we can just document these methods without having to specify
+        # a specific pydantic version as some users may not know which
+        # pydantic version they are currently using
+
+        @override
+        def model_dump(
+            self,
+            *,
+            mode: Literal["json", "python"] | str = "python",
+            include: IncEx | None = None,
+            exclude: IncEx | None = None,
+            by_alias: bool = False,
+            exclude_unset: bool = False,
+            exclude_defaults: bool = False,
+            exclude_none: bool = False,
+            round_trip: bool = False,
+            warnings: bool | Literal["none", "warn", "error"] = True,
+            context: dict[str, Any] | None = None,
+            serialize_as_any: bool = False,
+        ) -> dict[str, Any]:
+            """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
+
+            Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+
+            Args:
+                mode: The mode in which `to_python` should run.
+                    If mode is 'json', the dictionary will only contain JSON serializable types.
+                    If mode is 'python', the dictionary may contain any Python objects.
+                include: A list of fields to include in the output.
+                exclude: A list of fields to exclude from the output.
+                by_alias: Whether to use the field's alias in the dictionary key if defined.
+                exclude_unset: Whether to exclude fields that are unset or None from the output.
+                exclude_defaults: Whether to exclude fields that are set to their default value from the output.
+                exclude_none: Whether to exclude fields that have a value of `None` from the output.
+                round_trip: Whether to enable serialization and deserialization round-trip support.
+                warnings: Whether to log warnings when invalid fields are encountered.
+
+            Returns:
+                A dictionary representation of the model.
+            """
+            if mode not in {"json", "python"}:
+                raise ValueError("mode must be either 'json' or 'python'")
+            if round_trip != False:
+                raise ValueError("round_trip is only supported in Pydantic v2")
+            if warnings != True:
+                raise ValueError("warnings is only supported in Pydantic v2")
+            if context is not None:
+                raise ValueError("context is only supported in Pydantic v2")
+            if serialize_as_any != False:
+                raise ValueError("serialize_as_any is only supported in Pydantic v2")
+            dumped = super().dict(  # pyright: ignore[reportDeprecated]
+                include=include,
+                exclude=exclude,
+                by_alias=by_alias,
+                exclude_unset=exclude_unset,
+                exclude_defaults=exclude_defaults,
+                exclude_none=exclude_none,
+            )
+
+            return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped
+
+        @override
+        def model_dump_json(
+            self,
+            *,
+            indent: int | None = None,
+            include: IncEx | None = None,
+            exclude: IncEx | None = None,
+            by_alias: bool = False,
+            exclude_unset: bool = False,
+            exclude_defaults: bool = False,
+            exclude_none: bool = False,
+            round_trip: bool = False,
+            warnings: bool | Literal["none", "warn", "error"] = True,
+            context: dict[str, Any] | None = None,
+            serialize_as_any: bool = False,
+        ) -> str:
+            """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
+
+            Generates a JSON representation of the model using Pydantic's `to_json` method.
+
+            Args:
+                indent: Indentation to use in the JSON output. If None is passed, the output will be compact.
+                include: Field(s) to include in the JSON output. Can take either a string or set of strings.
+                exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings.
+                by_alias: Whether to serialize using field aliases.
+                exclude_unset: Whether to exclude fields that have not been explicitly set.
+                exclude_defaults: Whether to exclude fields that have the default value.
+                exclude_none: Whether to exclude fields that have a value of `None`.
+                round_trip: Whether to use serialization/deserialization between JSON and class instance.
+                warnings: Whether to show any warnings that occurred during serialization.
+
+            Returns:
+                A JSON string representation of the model.
+            """
+            if round_trip != False:
+                raise ValueError("round_trip is only supported in Pydantic v2")
+            if warnings != True:
+                raise ValueError("warnings is only supported in Pydantic v2")
+            if context is not None:
+                raise ValueError("context is only supported in Pydantic v2")
+            if serialize_as_any != False:
+                raise ValueError("serialize_as_any is only supported in Pydantic v2")
+            return super().json(  # type: ignore[reportDeprecated]
+                indent=indent,
+                include=include,
+                exclude=exclude,
+                by_alias=by_alias,
+                exclude_unset=exclude_unset,
+                exclude_defaults=exclude_defaults,
+                exclude_none=exclude_none,
+            )
+
+
+def _construct_field(value: object, field: FieldInfo, key: str) -> object:
+    if value is None:
+        return field_get_default(field)
+
+    if PYDANTIC_V2:
+        type_ = field.annotation
+    else:
+        type_ = cast(type, field.outer_type_)  # type: ignore
+
+    if type_ is None:
+        raise RuntimeError(f"Unexpected field type is None for {key}")
+
+    return construct_type(value=value, type_=type_)
+
+
+def is_basemodel(type_: type) -> bool:
+    """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`"""
+    if is_union(type_):
+        for variant in get_args(type_):
+            if is_basemodel(variant):
+                return True
+
+        return False
+
+    return is_basemodel_type(type_)
+
+
+def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]:
+    origin = get_origin(type_) or type_
+    if not inspect.isclass(origin):
+        return False
+    return issubclass(origin, BaseModel) or issubclass(origin, GenericModel)
+
+
+def build(
+    base_model_cls: Callable[P, _BaseModelT],
+    *args: P.args,
+    **kwargs: P.kwargs,
+) -> _BaseModelT:
+    """Construct a BaseModel class without validation.
+
+    This is useful for cases where you need to instantiate a `BaseModel`
+    from an API response as this provides type-safe params which isn't supported
+    by helpers like `construct_type()`.
+
+    ```py
+    build(MyModel, my_field_a="foo", my_field_b=123)
+    ```
+    """
+    if args:
+        raise TypeError(
+            "Received positional arguments which are not supported; Keyword arguments must be used instead",
+        )
+
+    return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs))
+
+
+def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
+    """Loose coercion to the expected type with construction of nested values.
+
+    Note: the returned value from this function is not guaranteed to match the
+    given type.
+    """
+    return cast(_T, construct_type(value=value, type_=type_))
+
+
+def construct_type(*, value: object, type_: object) -> object:
+    """Loose coercion to the expected type with construction of nested values.
+
+    If the given value does not match the expected type then it is returned as-is.
+    """
+
+    # store a reference to the original type we were given before we extract any inner
+    # types so that we can properly resolve forward references in `TypeAliasType` annotations
+    original_type = None
+
+    # we allow `object` as the input type because otherwise, passing things like
+    # `Literal['value']` will be reported as a type error by type checkers
+    type_ = cast("type[object]", type_)
+    if is_type_alias_type(type_):
+        original_type = type_  # type: ignore[unreachable]
+        type_ = type_.__value__  # type: ignore[unreachable]
+
+    # unwrap `Annotated[T, ...]` -> `T`
+    if is_annotated_type(type_):
+        meta: tuple[Any, ...] = get_args(type_)[1:]
+        type_ = extract_type_arg(type_, 0)
+    else:
+        meta = tuple()
+
+    # we need to use the origin class for any types that are subscripted generics
+    # e.g. Dict[str, object]
+    origin = get_origin(type_) or type_
+    args = get_args(type_)
+
+    if is_union(origin):
+        try:
+            return validate_type(type_=cast("type[object]", original_type or type_), value=value)
+        except Exception:
+            pass
+
+        # if the type is a discriminated union then we want to construct the right variant
+        # in the union, even if the data doesn't match exactly, otherwise we'd break code
+        # that relies on the constructed class types, e.g.
+        #
+        # class FooType:
+        #   kind: Literal['foo']
+        #   value: str
+        #
+        # class BarType:
+        #   kind: Literal['bar']
+        #   value: int
+        #
+        # without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then
+        # we'd end up constructing `FooType` when it should be `BarType`.
+        discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta)
+        if discriminator and is_mapping(value):
+            variant_value = value.get(discriminator.field_alias_from or discriminator.field_name)
+            if variant_value and isinstance(variant_value, str):
+                variant_type = discriminator.mapping.get(variant_value)
+                if variant_type:
+                    return construct_type(type_=variant_type, value=value)
+
+        # if the data is not valid, use the first variant that doesn't fail while deserializing
+        for variant in args:
+            try:
+                return construct_type(value=value, type_=variant)
+            except Exception:
+                continue
+
+        raise RuntimeError(f"Could not convert data into a valid instance of {type_}")
+
+    if origin == dict:
+        if not is_mapping(value):
+            return value
+
+        _, items_type = get_args(type_)  # Dict[_, items_type]
+        return {key: construct_type(value=item, type_=items_type) for key, item in value.items()}
+
+    if (
+        not is_literal_type(type_)
+        and inspect.isclass(origin)
+        and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel))
+    ):
+        if is_list(value):
+            return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value]
+
+        if is_mapping(value):
+            if issubclass(type_, BaseModel):
+                return type_.construct(**value)  # type: ignore[arg-type]
+
+            return cast(Any, type_).construct(**value)
+
+    if origin == list:
+        if not is_list(value):
+            return value
+
+        inner_type = args[0]  # List[inner_type]
+        return [construct_type(value=entry, type_=inner_type) for entry in value]
+
+    if origin == float:
+        if isinstance(value, int):
+            coerced = float(value)
+            if coerced != value:
+                return value
+            return coerced
+
+        return value
+
+    if type_ == datetime:
+        try:
+            return parse_datetime(value)  # type: ignore
+        except Exception:
+            return value
+
+    if type_ == date:
+        try:
+            return parse_date(value)  # type: ignore
+        except Exception:
+            return value
+
+    return value
+
+
+@runtime_checkable
+class CachedDiscriminatorType(Protocol):
+    __discriminator__: DiscriminatorDetails
+
+
+class DiscriminatorDetails:
+    field_name: str
+    """The name of the discriminator field in the variant class, e.g.
+
+    ```py
+    class Foo(BaseModel):
+        type: Literal['foo']
+    ```
+
+    Will result in field_name='type'
+    """
+
+    field_alias_from: str | None
+    """The name of the discriminator field in the API response, e.g.
+
+    ```py
+    class Foo(BaseModel):
+        type: Literal['foo'] = Field(alias='type_from_api')
+    ```
+
+    Will result in field_alias_from='type_from_api'
+    """
+
+    mapping: dict[str, type]
+    """Mapping of discriminator value to variant type, e.g.
+
+    {'foo': FooVariant, 'bar': BarVariant}
+    """
+
+    def __init__(
+        self,
+        *,
+        mapping: dict[str, type],
+        discriminator_field: str,
+        discriminator_alias: str | None,
+    ) -> None:
+        self.mapping = mapping
+        self.field_name = discriminator_field
+        self.field_alias_from = discriminator_alias
+
+
+def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None:
+    if isinstance(union, CachedDiscriminatorType):
+        return union.__discriminator__
+
+    discriminator_field_name: str | None = None
+
+    for annotation in meta_annotations:
+        if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None:
+            discriminator_field_name = annotation.discriminator
+            break
+
+    if not discriminator_field_name:
+        return None
+
+    mapping: dict[str, type] = {}
+    discriminator_alias: str | None = None
+
+    for variant in get_args(union):
+        variant = strip_annotated_type(variant)
+        if is_basemodel_type(variant):
+            if PYDANTIC_V2:
+                field = _extract_field_schema_pv2(variant, discriminator_field_name)
+                if not field:
+                    continue
+
+                # Note: if one variant defines an alias then they all should
+                discriminator_alias = field.get("serialization_alias")
+
+                field_schema = field["schema"]
+
+                if field_schema["type"] == "literal":
+                    for entry in cast("LiteralSchema", field_schema)["expected"]:
+                        if isinstance(entry, str):
+                            mapping[entry] = variant
+            else:
+                field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name)  # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+                if not field_info:
+                    continue
+
+                # Note: if one variant defines an alias then they all should
+                discriminator_alias = field_info.alias
+
+                if field_info.annotation and is_literal_type(field_info.annotation):
+                    for entry in get_args(field_info.annotation):
+                        if isinstance(entry, str):
+                            mapping[entry] = variant
+
+    if not mapping:
+        return None
+
+    details = DiscriminatorDetails(
+        mapping=mapping,
+        discriminator_field=discriminator_field_name,
+        discriminator_alias=discriminator_alias,
+    )
+    cast(CachedDiscriminatorType, union).__discriminator__ = details
+    return details
+
+
+def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None:
+    schema = model.__pydantic_core_schema__
+    if schema["type"] == "definitions":
+        schema = schema["schema"]
+
+    if schema["type"] != "model":
+        return None
+
+    schema = cast("ModelSchema", schema)
+    fields_schema = schema["schema"]
+    if fields_schema["type"] != "model-fields":
+        return None
+
+    fields_schema = cast("ModelFieldsSchema", fields_schema)
+    field = fields_schema["fields"].get(field_name)
+    if not field:
+        return None
+
+    return cast("ModelField", field)  # pyright: ignore[reportUnnecessaryCast]
+
+
+def validate_type(*, type_: type[_T], value: object) -> _T:
+    """Strict validation that the given value matches the expected type"""
+    if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel):
+        return cast(_T, parse_obj(type_, value))
+
+    return cast(_T, _validate_non_model_type(type_=type_, value=value))
+
+
+def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
+    """Add a pydantic config for the given type.
+
+    Note: this is a no-op on Pydantic v1.
+    """
+    setattr(typ, "__pydantic_config__", config)  # noqa: B010
+
+
+def add_request_id(obj: BaseModel, request_id: str | None) -> None:
+    obj._request_id = request_id
+
+    # in Pydantic v1, using setattr like we do above causes the attribute
+    # to be included when serializing the model which we don't want in this
+    # case so we need to explicitly exclude it
+    if not PYDANTIC_V2:
+        try:
+            exclude_fields = obj.__exclude_fields__  # type: ignore
+        except AttributeError:
+            cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"}
+        else:
+            cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"}
+
+
+# our use of subclasssing here causes weirdness for type checkers,
+# so we just pretend that we don't subclass
+if TYPE_CHECKING:
+    GenericModel = BaseModel
+else:
+
+    class GenericModel(BaseGenericModel, BaseModel):
+        pass
+
+
+if PYDANTIC_V2:
+    from pydantic import TypeAdapter as _TypeAdapter
+
+    _CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
+
+    if TYPE_CHECKING:
+        from pydantic import TypeAdapter
+    else:
+        TypeAdapter = _CachedTypeAdapter
+
+    def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
+        return TypeAdapter(type_).validate_python(value)
+
+elif not TYPE_CHECKING:  # TODO: condition is weird
+
+    class RootModel(GenericModel, Generic[_T]):
+        """Used as a placeholder to easily convert runtime types to a Pydantic format
+        to provide validation.
+
+        For example:
+        ```py
+        validated = RootModel[int](__root__="5").__root__
+        # validated: 5
+        ```
+        """
+
+        __root__: _T
+
+    def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
+        model = _create_pydantic_model(type_).validate(value)
+        return cast(_T, model.__root__)
+
+    def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]:
+        return RootModel[type_]  # type: ignore
+
+
+class FinalRequestOptionsInput(TypedDict, total=False):
+    method: Required[str]
+    url: Required[str]
+    params: Query
+    headers: Headers
+    max_retries: int
+    timeout: float | Timeout | None
+    files: HttpxRequestFiles | None
+    idempotency_key: str
+    json_data: Body
+    extra_json: AnyMapping
+
+
+@final
+class FinalRequestOptions(pydantic.BaseModel):
+    method: str
+    url: str
+    params: Query = {}
+    headers: Union[Headers, NotGiven] = NotGiven()
+    max_retries: Union[int, NotGiven] = NotGiven()
+    timeout: Union[float, Timeout, None, NotGiven] = NotGiven()
+    files: Union[HttpxRequestFiles, None] = None
+    idempotency_key: Union[str, None] = None
+    post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
+
+    # It should be noted that we cannot use `json` here as that would override
+    # a BaseModel method in an incompatible fashion.
+    json_data: Union[Body, None] = None
+    extra_json: Union[AnyMapping, None] = None
+
+    if PYDANTIC_V2:
+        model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
+    else:
+
+        class Config(pydantic.BaseConfig):  # pyright: ignore[reportDeprecated]
+            arbitrary_types_allowed: bool = True
+
+    def get_max_retries(self, max_retries: int) -> int:
+        if isinstance(self.max_retries, NotGiven):
+            return max_retries
+        return self.max_retries
+
+    def _strip_raw_response_header(self) -> None:
+        if not is_given(self.headers):
+            return
+
+        if self.headers.get(RAW_RESPONSE_HEADER):
+            self.headers = {**self.headers}
+            self.headers.pop(RAW_RESPONSE_HEADER)
+
+    # override the `construct` method so that we can run custom transformations.
+    # this is necessary as we don't want to do any actual runtime type checking
+    # (which means we can't use validators) but we do want to ensure that `NotGiven`
+    # values are not present
+    #
+    # type ignore required because we're adding explicit types to `**values`
+    @classmethod
+    def construct(  # type: ignore
+        cls,
+        _fields_set: set[str] | None = None,
+        **values: Unpack[FinalRequestOptionsInput],
+    ) -> FinalRequestOptions:
+        kwargs: dict[str, Any] = {
+            # we unconditionally call `strip_not_given` on any value
+            # as it will just ignore any non-mapping types
+            key: strip_not_given(value)
+            for key, value in values.items()
+        }
+        if PYDANTIC_V2:
+            return super().model_construct(_fields_set, **kwargs)
+        return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs))  # pyright: ignore[reportDeprecated]
+
+    if not TYPE_CHECKING:
+        # type checkers incorrectly complain about this assignment
+        model_construct = construct
diff --git a/.venv/lib/python3.12/site-packages/openai/_module_client.py b/.venv/lib/python3.12/site-packages/openai/_module_client.py
new file mode 100644
index 00000000..e7d26578
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_module_client.py
@@ -0,0 +1,106 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import override
+
+from . import resources, _load_client
+from ._utils import LazyProxy
+
+
+class ChatProxy(LazyProxy[resources.Chat]):
+    @override
+    def __load__(self) -> resources.Chat:
+        return _load_client().chat
+
+
+class BetaProxy(LazyProxy[resources.Beta]):
+    @override
+    def __load__(self) -> resources.Beta:
+        return _load_client().beta
+
+
+class FilesProxy(LazyProxy[resources.Files]):
+    @override
+    def __load__(self) -> resources.Files:
+        return _load_client().files
+
+
+class AudioProxy(LazyProxy[resources.Audio]):
+    @override
+    def __load__(self) -> resources.Audio:
+        return _load_client().audio
+
+
+class ImagesProxy(LazyProxy[resources.Images]):
+    @override
+    def __load__(self) -> resources.Images:
+        return _load_client().images
+
+
+class ModelsProxy(LazyProxy[resources.Models]):
+    @override
+    def __load__(self) -> resources.Models:
+        return _load_client().models
+
+
+class BatchesProxy(LazyProxy[resources.Batches]):
+    @override
+    def __load__(self) -> resources.Batches:
+        return _load_client().batches
+
+
+class UploadsProxy(LazyProxy[resources.Uploads]):
+    @override
+    def __load__(self) -> resources.Uploads:
+        return _load_client().uploads
+
+
+class ResponsesProxy(LazyProxy[resources.Responses]):
+    @override
+    def __load__(self) -> resources.Responses:
+        return _load_client().responses
+
+
+class EmbeddingsProxy(LazyProxy[resources.Embeddings]):
+    @override
+    def __load__(self) -> resources.Embeddings:
+        return _load_client().embeddings
+
+
+class CompletionsProxy(LazyProxy[resources.Completions]):
+    @override
+    def __load__(self) -> resources.Completions:
+        return _load_client().completions
+
+
+class ModerationsProxy(LazyProxy[resources.Moderations]):
+    @override
+    def __load__(self) -> resources.Moderations:
+        return _load_client().moderations
+
+
+class FineTuningProxy(LazyProxy[resources.FineTuning]):
+    @override
+    def __load__(self) -> resources.FineTuning:
+        return _load_client().fine_tuning
+
+
+class VectorStoresProxy(LazyProxy[resources.VectorStores]):
+    @override
+    def __load__(self) -> resources.VectorStores:
+        return _load_client().vector_stores
+
+
+chat: resources.Chat = ChatProxy().__as_proxied__()
+beta: resources.Beta = BetaProxy().__as_proxied__()
+files: resources.Files = FilesProxy().__as_proxied__()
+audio: resources.Audio = AudioProxy().__as_proxied__()
+images: resources.Images = ImagesProxy().__as_proxied__()
+models: resources.Models = ModelsProxy().__as_proxied__()
+batches: resources.Batches = BatchesProxy().__as_proxied__()
+uploads: resources.Uploads = UploadsProxy().__as_proxied__()
+responses: resources.Responses = ResponsesProxy().__as_proxied__()
+embeddings: resources.Embeddings = EmbeddingsProxy().__as_proxied__()
+completions: resources.Completions = CompletionsProxy().__as_proxied__()
+moderations: resources.Moderations = ModerationsProxy().__as_proxied__()
+fine_tuning: resources.FineTuning = FineTuningProxy().__as_proxied__()
+vector_stores: resources.VectorStores = VectorStoresProxy().__as_proxied__()
diff --git a/.venv/lib/python3.12/site-packages/openai/_qs.py b/.venv/lib/python3.12/site-packages/openai/_qs.py
new file mode 100644
index 00000000..274320ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_qs.py
@@ -0,0 +1,150 @@
+from __future__ import annotations
+
+from typing import Any, List, Tuple, Union, Mapping, TypeVar
+from urllib.parse import parse_qs, urlencode
+from typing_extensions import Literal, get_args
+
+from ._types import NOT_GIVEN, NotGiven, NotGivenOr
+from ._utils import flatten
+
+_T = TypeVar("_T")
+
+
+ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
+NestedFormat = Literal["dots", "brackets"]
+
+PrimitiveData = Union[str, int, float, bool, None]
+# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
+# https://github.com/microsoft/pyright/issues/3555
+Data = Union[PrimitiveData, List[Any], Tuple[Any], "Mapping[str, Any]"]
+Params = Mapping[str, Data]
+
+
+class Querystring:
+    array_format: ArrayFormat
+    nested_format: NestedFormat
+
+    def __init__(
+        self,
+        *,
+        array_format: ArrayFormat = "repeat",
+        nested_format: NestedFormat = "brackets",
+    ) -> None:
+        self.array_format = array_format
+        self.nested_format = nested_format
+
+    def parse(self, query: str) -> Mapping[str, object]:
+        # Note: custom format syntax is not supported yet
+        return parse_qs(query)
+
+    def stringify(
+        self,
+        params: Params,
+        *,
+        array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
+        nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+    ) -> str:
+        return urlencode(
+            self.stringify_items(
+                params,
+                array_format=array_format,
+                nested_format=nested_format,
+            )
+        )
+
+    def stringify_items(
+        self,
+        params: Params,
+        *,
+        array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
+        nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+    ) -> list[tuple[str, str]]:
+        opts = Options(
+            qs=self,
+            array_format=array_format,
+            nested_format=nested_format,
+        )
+        return flatten([self._stringify_item(key, value, opts) for key, value in params.items()])
+
+    def _stringify_item(
+        self,
+        key: str,
+        value: Data,
+        opts: Options,
+    ) -> list[tuple[str, str]]:
+        if isinstance(value, Mapping):
+            items: list[tuple[str, str]] = []
+            nested_format = opts.nested_format
+            for subkey, subvalue in value.items():
+                items.extend(
+                    self._stringify_item(
+                        # TODO: error if unknown format
+                        f"{key}.{subkey}" if nested_format == "dots" else f"{key}[{subkey}]",
+                        subvalue,
+                        opts,
+                    )
+                )
+            return items
+
+        if isinstance(value, (list, tuple)):
+            array_format = opts.array_format
+            if array_format == "comma":
+                return [
+                    (
+                        key,
+                        ",".join(self._primitive_value_to_str(item) for item in value if item is not None),
+                    ),
+                ]
+            elif array_format == "repeat":
+                items = []
+                for item in value:
+                    items.extend(self._stringify_item(key, item, opts))
+                return items
+            elif array_format == "indices":
+                raise NotImplementedError("The array indices format is not supported yet")
+            elif array_format == "brackets":
+                items = []
+                key = key + "[]"
+                for item in value:
+                    items.extend(self._stringify_item(key, item, opts))
+                return items
+            else:
+                raise NotImplementedError(
+                    f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
+                )
+
+        serialised = self._primitive_value_to_str(value)
+        if not serialised:
+            return []
+        return [(key, serialised)]
+
+    def _primitive_value_to_str(self, value: PrimitiveData) -> str:
+        # copied from httpx
+        if value is True:
+            return "true"
+        elif value is False:
+            return "false"
+        elif value is None:
+            return ""
+        return str(value)
+
+
+_qs = Querystring()
+parse = _qs.parse
+stringify = _qs.stringify
+stringify_items = _qs.stringify_items
+
+
+class Options:
+    array_format: ArrayFormat
+    nested_format: NestedFormat
+
+    def __init__(
+        self,
+        qs: Querystring = _qs,
+        *,
+        array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
+        nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+    ) -> None:
+        self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format
+        self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format
diff --git a/.venv/lib/python3.12/site-packages/openai/_resource.py b/.venv/lib/python3.12/site-packages/openai/_resource.py
new file mode 100644
index 00000000..fff9ba19
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_resource.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import time
+from typing import TYPE_CHECKING
+
+import anyio
+
+if TYPE_CHECKING:
+    from ._client import OpenAI, AsyncOpenAI
+
+
+class SyncAPIResource:
+    _client: OpenAI
+
+    def __init__(self, client: OpenAI) -> None:
+        self._client = client
+        self._get = client.get
+        self._post = client.post
+        self._patch = client.patch
+        self._put = client.put
+        self._delete = client.delete
+        self._get_api_list = client.get_api_list
+
+    def _sleep(self, seconds: float) -> None:
+        time.sleep(seconds)
+
+
+class AsyncAPIResource:
+    _client: AsyncOpenAI
+
+    def __init__(self, client: AsyncOpenAI) -> None:
+        self._client = client
+        self._get = client.get
+        self._post = client.post
+        self._patch = client.patch
+        self._put = client.put
+        self._delete = client.delete
+        self._get_api_list = client.get_api_list
+
+    async def _sleep(self, seconds: float) -> None:
+        await anyio.sleep(seconds)
diff --git a/.venv/lib/python3.12/site-packages/openai/_response.py b/.venv/lib/python3.12/site-packages/openai/_response.py
new file mode 100644
index 00000000..95e94e65
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_response.py
@@ -0,0 +1,848 @@
+from __future__ import annotations
+
+import os
+import inspect
+import logging
+import datetime
+import functools
+from types import TracebackType
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Union,
+    Generic,
+    TypeVar,
+    Callable,
+    Iterator,
+    AsyncIterator,
+    cast,
+    overload,
+)
+from typing_extensions import Awaitable, ParamSpec, override, get_origin
+
+import anyio
+import httpx
+import pydantic
+
+from ._types import NoneType
+from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type, extract_type_var_from_base
+from ._models import BaseModel, is_basemodel, add_request_id
+from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
+from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
+from ._exceptions import OpenAIError, APIResponseValidationError
+
+if TYPE_CHECKING:
+    from ._models import FinalRequestOptions
+    from ._base_client import BaseClient
+
+
+P = ParamSpec("P")
+R = TypeVar("R")
+_T = TypeVar("_T")
+_APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]")
+_AsyncAPIResponseT = TypeVar("_AsyncAPIResponseT", bound="AsyncAPIResponse[Any]")
+
+log: logging.Logger = logging.getLogger(__name__)
+
+
+class BaseAPIResponse(Generic[R]):
+    _cast_to: type[R]
+    _client: BaseClient[Any, Any]
+    _parsed_by_type: dict[type[Any], Any]
+    _is_sse_stream: bool
+    _stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
+    _options: FinalRequestOptions
+
+    http_response: httpx.Response
+
+    retries_taken: int
+    """The number of retries made. If no retries happened this will be `0`"""
+
+    def __init__(
+        self,
+        *,
+        raw: httpx.Response,
+        cast_to: type[R],
+        client: BaseClient[Any, Any],
+        stream: bool,
+        stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
+        options: FinalRequestOptions,
+        retries_taken: int = 0,
+    ) -> None:
+        self._cast_to = cast_to
+        self._client = client
+        self._parsed_by_type = {}
+        self._is_sse_stream = stream
+        self._stream_cls = stream_cls
+        self._options = options
+        self.http_response = raw
+        self.retries_taken = retries_taken
+
+    @property
+    def headers(self) -> httpx.Headers:
+        return self.http_response.headers
+
+    @property
+    def http_request(self) -> httpx.Request:
+        """Returns the httpx Request instance associated with the current response."""
+        return self.http_response.request
+
+    @property
+    def status_code(self) -> int:
+        return self.http_response.status_code
+
+    @property
+    def url(self) -> httpx.URL:
+        """Returns the URL for which the request was made."""
+        return self.http_response.url
+
+    @property
+    def method(self) -> str:
+        return self.http_request.method
+
+    @property
+    def http_version(self) -> str:
+        return self.http_response.http_version
+
+    @property
+    def elapsed(self) -> datetime.timedelta:
+        """The time taken for the complete request/response cycle to complete."""
+        return self.http_response.elapsed
+
+    @property
+    def is_closed(self) -> bool:
+        """Whether or not the response body has been closed.
+
+        If this is False then there is response data that has not been read yet.
+        You must either fully consume the response body or call `.close()`
+        before discarding the response to prevent resource leaks.
+        """
+        return self.http_response.is_closed
+
+    @override
+    def __repr__(self) -> str:
+        return (
+            f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
+        )
+
+    def _parse(self, *, to: type[_T] | None = None) -> R | _T:
+        cast_to = to if to is not None else self._cast_to
+
+        # unwrap `TypeAlias('Name', T)` -> `T`
+        if is_type_alias_type(cast_to):
+            cast_to = cast_to.__value__  # type: ignore[unreachable]
+
+        # unwrap `Annotated[T, ...]` -> `T`
+        if cast_to and is_annotated_type(cast_to):
+            cast_to = extract_type_arg(cast_to, 0)
+
+        origin = get_origin(cast_to) or cast_to
+
+        if self._is_sse_stream:
+            if to:
+                if not is_stream_class_type(to):
+                    raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
+
+                return cast(
+                    _T,
+                    to(
+                        cast_to=extract_stream_chunk_type(
+                            to,
+                            failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
+                        ),
+                        response=self.http_response,
+                        client=cast(Any, self._client),
+                    ),
+                )
+
+            if self._stream_cls:
+                return cast(
+                    R,
+                    self._stream_cls(
+                        cast_to=extract_stream_chunk_type(self._stream_cls),
+                        response=self.http_response,
+                        client=cast(Any, self._client),
+                    ),
+                )
+
+            stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
+            if stream_cls is None:
+                raise MissingStreamClassError()
+
+            return cast(
+                R,
+                stream_cls(
+                    cast_to=cast_to,
+                    response=self.http_response,
+                    client=cast(Any, self._client),
+                ),
+            )
+
+        if cast_to is NoneType:
+            return cast(R, None)
+
+        response = self.http_response
+        if cast_to == str:
+            return cast(R, response.text)
+
+        if cast_to == bytes:
+            return cast(R, response.content)
+
+        if cast_to == int:
+            return cast(R, int(response.text))
+
+        if cast_to == float:
+            return cast(R, float(response.text))
+
+        if cast_to == bool:
+            return cast(R, response.text.lower() == "true")
+
+        # handle the legacy binary response case
+        if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent":
+            return cast(R, cast_to(response))  # type: ignore
+
+        if origin == APIResponse:
+            raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
+
+        if inspect.isclass(origin) and issubclass(origin, httpx.Response):
+            # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
+            # and pass that class to our request functions. We cannot change the variance to be either
+            # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
+            # the response class ourselves but that is something that should be supported directly in httpx
+            # as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
+            if cast_to != httpx.Response:
+                raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
+            return cast(R, response)
+
+        if (
+            inspect.isclass(
+                origin  # pyright: ignore[reportUnknownArgumentType]
+            )
+            and not issubclass(origin, BaseModel)
+            and issubclass(origin, pydantic.BaseModel)
+        ):
+            raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
+
+        if (
+            cast_to is not object
+            and not origin is list
+            and not origin is dict
+            and not origin is Union
+            and not issubclass(origin, BaseModel)
+        ):
+            raise RuntimeError(
+                f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
+            )
+
+        # split is required to handle cases where additional information is included
+        # in the response, e.g. application/json; charset=utf-8
+        content_type, *_ = response.headers.get("content-type", "*").split(";")
+        if content_type != "application/json":
+            if is_basemodel(cast_to):
+                try:
+                    data = response.json()
+                except Exception as exc:
+                    log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
+                else:
+                    return self._client._process_response_data(
+                        data=data,
+                        cast_to=cast_to,  # type: ignore
+                        response=response,
+                    )
+
+            if self._client._strict_response_validation:
+                raise APIResponseValidationError(
+                    response=response,
+                    message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
+                    body=response.text,
+                )
+
+            # If the API responds with content that isn't JSON then we just return
+            # the (decoded) text without performing any parsing so that you can still
+            # handle the response however you need to.
+            return response.text  # type: ignore
+
+        data = response.json()
+
+        return self._client._process_response_data(
+            data=data,
+            cast_to=cast_to,  # type: ignore
+            response=response,
+        )
+
+
+class APIResponse(BaseAPIResponse[R]):
+    @property
+    def request_id(self) -> str | None:
+        return self.http_response.headers.get("x-request-id")  # type: ignore[no-any-return]
+
+    @overload
+    def parse(self, *, to: type[_T]) -> _T: ...
+
+    @overload
+    def parse(self) -> R: ...
+
+    def parse(self, *, to: type[_T] | None = None) -> R | _T:
+        """Returns the rich python representation of this response's data.
+
+        For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
+
+        You can customise the type that the response is parsed into through
+        the `to` argument, e.g.
+
+        ```py
+        from openai import BaseModel
+
+
+        class MyModel(BaseModel):
+            foo: str
+
+
+        obj = response.parse(to=MyModel)
+        print(obj.foo)
+        ```
+
+        We support parsing:
+          - `BaseModel`
+          - `dict`
+          - `list`
+          - `Union`
+          - `str`
+          - `int`
+          - `float`
+          - `httpx.Response`
+        """
+        cache_key = to if to is not None else self._cast_to
+        cached = self._parsed_by_type.get(cache_key)
+        if cached is not None:
+            return cached  # type: ignore[no-any-return]
+
+        if not self._is_sse_stream:
+            self.read()
+
+        parsed = self._parse(to=to)
+        if is_given(self._options.post_parser):
+            parsed = self._options.post_parser(parsed)
+
+        if isinstance(parsed, BaseModel):
+            add_request_id(parsed, self.request_id)
+
+        self._parsed_by_type[cache_key] = parsed
+        return cast(R, parsed)
+
+    def read(self) -> bytes:
+        """Read and return the binary response content."""
+        try:
+            return self.http_response.read()
+        except httpx.StreamConsumed as exc:
+            # The default error raised by httpx isn't very
+            # helpful in our case so we re-raise it with
+            # a different error message.
+            raise StreamAlreadyConsumed() from exc
+
+    def text(self) -> str:
+        """Read and decode the response content into a string."""
+        self.read()
+        return self.http_response.text
+
+    def json(self) -> object:
+        """Read and decode the JSON response content."""
+        self.read()
+        return self.http_response.json()
+
+    def close(self) -> None:
+        """Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        self.http_response.close()
+
+    def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]:
+        """
+        A byte-iterator over the decoded response content.
+
+        This automatically handles gzip, deflate and brotli encoded responses.
+        """
+        for chunk in self.http_response.iter_bytes(chunk_size):
+            yield chunk
+
+    def iter_text(self, chunk_size: int | None = None) -> Iterator[str]:
+        """A str-iterator over the decoded response content
+        that handles both gzip, deflate, etc but also detects the content's
+        string encoding.
+        """
+        for chunk in self.http_response.iter_text(chunk_size):
+            yield chunk
+
+    def iter_lines(self) -> Iterator[str]:
+        """Like `iter_text()` but will only yield chunks for each line"""
+        for chunk in self.http_response.iter_lines():
+            yield chunk
+
+
+class AsyncAPIResponse(BaseAPIResponse[R]):
+    @property
+    def request_id(self) -> str | None:
+        return self.http_response.headers.get("x-request-id")  # type: ignore[no-any-return]
+
+    @overload
+    async def parse(self, *, to: type[_T]) -> _T: ...
+
+    @overload
+    async def parse(self) -> R: ...
+
+    async def parse(self, *, to: type[_T] | None = None) -> R | _T:
+        """Returns the rich python representation of this response's data.
+
+        For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
+
+        You can customise the type that the response is parsed into through
+        the `to` argument, e.g.
+
+        ```py
+        from openai import BaseModel
+
+
+        class MyModel(BaseModel):
+            foo: str
+
+
+        obj = response.parse(to=MyModel)
+        print(obj.foo)
+        ```
+
+        We support parsing:
+          - `BaseModel`
+          - `dict`
+          - `list`
+          - `Union`
+          - `str`
+          - `httpx.Response`
+        """
+        cache_key = to if to is not None else self._cast_to
+        cached = self._parsed_by_type.get(cache_key)
+        if cached is not None:
+            return cached  # type: ignore[no-any-return]
+
+        if not self._is_sse_stream:
+            await self.read()
+
+        parsed = self._parse(to=to)
+        if is_given(self._options.post_parser):
+            parsed = self._options.post_parser(parsed)
+
+        if isinstance(parsed, BaseModel):
+            add_request_id(parsed, self.request_id)
+
+        self._parsed_by_type[cache_key] = parsed
+        return cast(R, parsed)
+
+    async def read(self) -> bytes:
+        """Read and return the binary response content."""
+        try:
+            return await self.http_response.aread()
+        except httpx.StreamConsumed as exc:
+            # the default error raised by httpx isn't very
+            # helpful in our case so we re-raise it with
+            # a different error message
+            raise StreamAlreadyConsumed() from exc
+
+    async def text(self) -> str:
+        """Read and decode the response content into a string."""
+        await self.read()
+        return self.http_response.text
+
+    async def json(self) -> object:
+        """Read and decode the JSON response content."""
+        await self.read()
+        return self.http_response.json()
+
+    async def close(self) -> None:
+        """Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        await self.http_response.aclose()
+
+    async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
+        """
+        A byte-iterator over the decoded response content.
+
+        This automatically handles gzip, deflate and brotli encoded responses.
+        """
+        async for chunk in self.http_response.aiter_bytes(chunk_size):
+            yield chunk
+
+    async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
+        """A str-iterator over the decoded response content
+        that handles both gzip, deflate, etc but also detects the content's
+        string encoding.
+        """
+        async for chunk in self.http_response.aiter_text(chunk_size):
+            yield chunk
+
+    async def iter_lines(self) -> AsyncIterator[str]:
+        """Like `iter_text()` but will only yield chunks for each line"""
+        async for chunk in self.http_response.aiter_lines():
+            yield chunk
+
+
+class BinaryAPIResponse(APIResponse[bytes]):
+    """Subclass of APIResponse providing helpers for dealing with binary data.
+
+    Note: If you want to stream the response data instead of eagerly reading it
+    all at once then you should use `.with_streaming_response` when making
+    the API request, e.g. `.with_streaming_response.get_binary_response()`
+    """
+
+    def write_to_file(
+        self,
+        file: str | os.PathLike[str],
+    ) -> None:
+        """Write the output to the given file.
+
+        Accepts a filename or any path-like object, e.g. pathlib.Path
+
+        Note: if you want to stream the data to the file instead of writing
+        all at once then you should use `.with_streaming_response` when making
+        the API request, e.g. `.with_streaming_response.get_binary_response()`
+        """
+        with open(file, mode="wb") as f:
+            for data in self.iter_bytes():
+                f.write(data)
+
+
+class AsyncBinaryAPIResponse(AsyncAPIResponse[bytes]):
+    """Subclass of APIResponse providing helpers for dealing with binary data.
+
+    Note: If you want to stream the response data instead of eagerly reading it
+    all at once then you should use `.with_streaming_response` when making
+    the API request, e.g. `.with_streaming_response.get_binary_response()`
+    """
+
+    async def write_to_file(
+        self,
+        file: str | os.PathLike[str],
+    ) -> None:
+        """Write the output to the given file.
+
+        Accepts a filename or any path-like object, e.g. pathlib.Path
+
+        Note: if you want to stream the data to the file instead of writing
+        all at once then you should use `.with_streaming_response` when making
+        the API request, e.g. `.with_streaming_response.get_binary_response()`
+        """
+        path = anyio.Path(file)
+        async with await path.open(mode="wb") as f:
+            async for data in self.iter_bytes():
+                await f.write(data)
+
+
+class StreamedBinaryAPIResponse(APIResponse[bytes]):
+    def stream_to_file(
+        self,
+        file: str | os.PathLike[str],
+        *,
+        chunk_size: int | None = None,
+    ) -> None:
+        """Streams the output to the given file.
+
+        Accepts a filename or any path-like object, e.g. pathlib.Path
+        """
+        with open(file, mode="wb") as f:
+            for data in self.iter_bytes(chunk_size):
+                f.write(data)
+
+
+class AsyncStreamedBinaryAPIResponse(AsyncAPIResponse[bytes]):
+    async def stream_to_file(
+        self,
+        file: str | os.PathLike[str],
+        *,
+        chunk_size: int | None = None,
+    ) -> None:
+        """Streams the output to the given file.
+
+        Accepts a filename or any path-like object, e.g. pathlib.Path
+        """
+        path = anyio.Path(file)
+        async with await path.open(mode="wb") as f:
+            async for data in self.iter_bytes(chunk_size):
+                await f.write(data)
+
+
+class MissingStreamClassError(TypeError):
+    def __init__(self) -> None:
+        super().__init__(
+            "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference",
+        )
+
+
+class StreamAlreadyConsumed(OpenAIError):
+    """
+    Attempted to read or stream content, but the content has already
+    been streamed.
+
+    This can happen if you use a method like `.iter_lines()` and then attempt
+    to read th entire response body afterwards, e.g.
+
+    ```py
+    response = await client.post(...)
+    async for line in response.iter_lines():
+        ...  # do something with `line`
+
+    content = await response.read()
+    # ^ error
+    ```
+
+    If you want this behaviour you'll need to either manually accumulate the response
+    content or call `await response.read()` before iterating over the stream.
+    """
+
+    def __init__(self) -> None:
+        message = (
+            "Attempted to read or stream some content, but the content has "
+            "already been streamed. "
+            "This could be due to attempting to stream the response "
+            "content more than once."
+            "\n\n"
+            "You can fix this by manually accumulating the response content while streaming "
+            "or by calling `.read()` before starting to stream."
+        )
+        super().__init__(message)
+
+
+class ResponseContextManager(Generic[_APIResponseT]):
+    """Context manager for ensuring that a request is not made
+    until it is entered and that the response will always be closed
+    when the context manager exits
+    """
+
+    def __init__(self, request_func: Callable[[], _APIResponseT]) -> None:
+        self._request_func = request_func
+        self.__response: _APIResponseT | None = None
+
+    def __enter__(self) -> _APIResponseT:
+        self.__response = self._request_func()
+        return self.__response
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__response is not None:
+            self.__response.close()
+
+
+class AsyncResponseContextManager(Generic[_AsyncAPIResponseT]):
+    """Context manager for ensuring that a request is not made
+    until it is entered and that the response will always be closed
+    when the context manager exits
+    """
+
+    def __init__(self, api_request: Awaitable[_AsyncAPIResponseT]) -> None:
+        self._api_request = api_request
+        self.__response: _AsyncAPIResponseT | None = None
+
+    async def __aenter__(self) -> _AsyncAPIResponseT:
+        self.__response = await self._api_request
+        return self.__response
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__response is not None:
+            await self.__response.close()
+
+
+def to_streamed_response_wrapper(func: Callable[P, R]) -> Callable[P, ResponseContextManager[APIResponse[R]]]:
+    """Higher order function that takes one of our bound API methods and wraps it
+    to support streaming and returning the raw `APIResponse` object directly.
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[APIResponse[R]]:
+        extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "stream"
+
+        kwargs["extra_headers"] = extra_headers
+
+        make_request = functools.partial(func, *args, **kwargs)
+
+        return ResponseContextManager(cast(Callable[[], APIResponse[R]], make_request))
+
+    return wrapped
+
+
+def async_to_streamed_response_wrapper(
+    func: Callable[P, Awaitable[R]],
+) -> Callable[P, AsyncResponseContextManager[AsyncAPIResponse[R]]]:
+    """Higher order function that takes one of our bound API methods and wraps it
+    to support streaming and returning the raw `APIResponse` object directly.
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[AsyncAPIResponse[R]]:
+        extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "stream"
+
+        kwargs["extra_headers"] = extra_headers
+
+        make_request = func(*args, **kwargs)
+
+        return AsyncResponseContextManager(cast(Awaitable[AsyncAPIResponse[R]], make_request))
+
+    return wrapped
+
+
+def to_custom_streamed_response_wrapper(
+    func: Callable[P, object],
+    response_cls: type[_APIResponseT],
+) -> Callable[P, ResponseContextManager[_APIResponseT]]:
+    """Higher order function that takes one of our bound API methods and an `APIResponse` class
+    and wraps the method to support streaming and returning the given response class directly.
+
+    Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[_APIResponseT]:
+        extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "stream"
+        extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
+
+        kwargs["extra_headers"] = extra_headers
+
+        make_request = functools.partial(func, *args, **kwargs)
+
+        return ResponseContextManager(cast(Callable[[], _APIResponseT], make_request))
+
+    return wrapped
+
+
+def async_to_custom_streamed_response_wrapper(
+    func: Callable[P, Awaitable[object]],
+    response_cls: type[_AsyncAPIResponseT],
+) -> Callable[P, AsyncResponseContextManager[_AsyncAPIResponseT]]:
+    """Higher order function that takes one of our bound API methods and an `APIResponse` class
+    and wraps the method to support streaming and returning the given response class directly.
+
+    Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[_AsyncAPIResponseT]:
+        extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "stream"
+        extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
+
+        kwargs["extra_headers"] = extra_headers
+
+        make_request = func(*args, **kwargs)
+
+        return AsyncResponseContextManager(cast(Awaitable[_AsyncAPIResponseT], make_request))
+
+    return wrapped
+
+
+def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, APIResponse[R]]:
+    """Higher order function that takes one of our bound API methods and wraps it
+    to support returning the raw `APIResponse` object directly.
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> APIResponse[R]:
+        extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "raw"
+
+        kwargs["extra_headers"] = extra_headers
+
+        return cast(APIResponse[R], func(*args, **kwargs))
+
+    return wrapped
+
+
+def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[AsyncAPIResponse[R]]]:
+    """Higher order function that takes one of our bound API methods and wraps it
+    to support returning the raw `APIResponse` object directly.
+    """
+
+    @functools.wraps(func)
+    async def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncAPIResponse[R]:
+        extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "raw"
+
+        kwargs["extra_headers"] = extra_headers
+
+        return cast(AsyncAPIResponse[R], await func(*args, **kwargs))
+
+    return wrapped
+
+
+def to_custom_raw_response_wrapper(
+    func: Callable[P, object],
+    response_cls: type[_APIResponseT],
+) -> Callable[P, _APIResponseT]:
+    """Higher order function that takes one of our bound API methods and an `APIResponse` class
+    and wraps the method to support returning the given response class directly.
+
+    Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> _APIResponseT:
+        extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "raw"
+        extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
+
+        kwargs["extra_headers"] = extra_headers
+
+        return cast(_APIResponseT, func(*args, **kwargs))
+
+    return wrapped
+
+
+def async_to_custom_raw_response_wrapper(
+    func: Callable[P, Awaitable[object]],
+    response_cls: type[_AsyncAPIResponseT],
+) -> Callable[P, Awaitable[_AsyncAPIResponseT]]:
+    """Higher order function that takes one of our bound API methods and an `APIResponse` class
+    and wraps the method to support returning the given response class directly.
+
+    Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
+    """
+
+    @functools.wraps(func)
+    def wrapped(*args: P.args, **kwargs: P.kwargs) -> Awaitable[_AsyncAPIResponseT]:
+        extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
+        extra_headers[RAW_RESPONSE_HEADER] = "raw"
+        extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
+
+        kwargs["extra_headers"] = extra_headers
+
+        return cast(Awaitable[_AsyncAPIResponseT], func(*args, **kwargs))
+
+    return wrapped
+
+
+def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type:
+    """Given a type like `APIResponse[T]`, returns the generic type variable `T`.
+
+    This also handles the case where a concrete subclass is given, e.g.
+    ```py
+    class MyResponse(APIResponse[bytes]):
+        ...
+
+    extract_response_type(MyResponse) -> bytes
+    ```
+    """
+    return extract_type_var_from_base(
+        typ,
+        generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse, AsyncAPIResponse)),
+        index=0,
+    )
diff --git a/.venv/lib/python3.12/site-packages/openai/_streaming.py b/.venv/lib/python3.12/site-packages/openai/_streaming.py
new file mode 100644
index 00000000..9cb72ffe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_streaming.py
@@ -0,0 +1,410 @@
+# Note: initially copied from https://github.com/florimondmanca/httpx-sse/blob/master/src/httpx_sse/_decoders.py
+from __future__ import annotations
+
+import json
+import inspect
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
+from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
+
+import httpx
+
+from ._utils import is_mapping, extract_type_var_from_base
+from ._exceptions import APIError
+
+if TYPE_CHECKING:
+    from ._client import OpenAI, AsyncOpenAI
+
+
+_T = TypeVar("_T")
+
+
+class Stream(Generic[_T]):
+    """Provides the core interface to iterate over a synchronous stream response."""
+
+    response: httpx.Response
+
+    _decoder: SSEBytesDecoder
+
+    def __init__(
+        self,
+        *,
+        cast_to: type[_T],
+        response: httpx.Response,
+        client: OpenAI,
+    ) -> None:
+        self.response = response
+        self._cast_to = cast_to
+        self._client = client
+        self._decoder = client._make_sse_decoder()
+        self._iterator = self.__stream__()
+
+    def __next__(self) -> _T:
+        return self._iterator.__next__()
+
+    def __iter__(self) -> Iterator[_T]:
+        for item in self._iterator:
+            yield item
+
+    def _iter_events(self) -> Iterator[ServerSentEvent]:
+        yield from self._decoder.iter_bytes(self.response.iter_bytes())
+
+    def __stream__(self) -> Iterator[_T]:
+        cast_to = cast(Any, self._cast_to)
+        response = self.response
+        process_data = self._client._process_response_data
+        iterator = self._iter_events()
+
+        for sse in iterator:
+            if sse.data.startswith("[DONE]"):
+                break
+
+            if sse.event is None or sse.event.startswith("response."):
+                data = sse.json()
+                if is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data=data, cast_to=cast_to, response=response)
+
+            else:
+                data = sse.json()
+
+                if sse.event == "error" and is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
+
+        # Ensure the entire stream is consumed
+        for _sse in iterator:
+            ...
+
+    def __enter__(self) -> Self:
+        return self
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.close()
+
+    def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        self.response.close()
+
+
+class AsyncStream(Generic[_T]):
+    """Provides the core interface to iterate over an asynchronous stream response."""
+
+    response: httpx.Response
+
+    _decoder: SSEDecoder | SSEBytesDecoder
+
+    def __init__(
+        self,
+        *,
+        cast_to: type[_T],
+        response: httpx.Response,
+        client: AsyncOpenAI,
+    ) -> None:
+        self.response = response
+        self._cast_to = cast_to
+        self._client = client
+        self._decoder = client._make_sse_decoder()
+        self._iterator = self.__stream__()
+
+    async def __anext__(self) -> _T:
+        return await self._iterator.__anext__()
+
+    async def __aiter__(self) -> AsyncIterator[_T]:
+        async for item in self._iterator:
+            yield item
+
+    async def _iter_events(self) -> AsyncIterator[ServerSentEvent]:
+        async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()):
+            yield sse
+
+    async def __stream__(self) -> AsyncIterator[_T]:
+        cast_to = cast(Any, self._cast_to)
+        response = self.response
+        process_data = self._client._process_response_data
+        iterator = self._iter_events()
+
+        async for sse in iterator:
+            if sse.data.startswith("[DONE]"):
+                break
+
+            if sse.event is None or sse.event.startswith("response."):
+                data = sse.json()
+                if is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data=data, cast_to=cast_to, response=response)
+
+            else:
+                data = sse.json()
+
+                if sse.event == "error" and is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
+
+        # Ensure the entire stream is consumed
+        async for _sse in iterator:
+            ...
+
+    async def __aenter__(self) -> Self:
+        return self
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        await self.close()
+
+    async def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        await self.response.aclose()
+
+
+class ServerSentEvent:
+    def __init__(
+        self,
+        *,
+        event: str | None = None,
+        data: str | None = None,
+        id: str | None = None,
+        retry: int | None = None,
+    ) -> None:
+        if data is None:
+            data = ""
+
+        self._id = id
+        self._data = data
+        self._event = event or None
+        self._retry = retry
+
+    @property
+    def event(self) -> str | None:
+        return self._event
+
+    @property
+    def id(self) -> str | None:
+        return self._id
+
+    @property
+    def retry(self) -> int | None:
+        return self._retry
+
+    @property
+    def data(self) -> str:
+        return self._data
+
+    def json(self) -> Any:
+        return json.loads(self.data)
+
+    @override
+    def __repr__(self) -> str:
+        return f"ServerSentEvent(event={self.event}, data={self.data}, id={self.id}, retry={self.retry})"
+
+
+class SSEDecoder:
+    _data: list[str]
+    _event: str | None
+    _retry: int | None
+    _last_event_id: str | None
+
+    def __init__(self) -> None:
+        self._event = None
+        self._data = []
+        self._last_event_id = None
+        self._retry = None
+
+    def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
+        """Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
+        for chunk in self._iter_chunks(iterator):
+            # Split before decoding so splitlines() only uses \r and \n
+            for raw_line in chunk.splitlines():
+                line = raw_line.decode("utf-8")
+                sse = self.decode(line)
+                if sse:
+                    yield sse
+
+    def _iter_chunks(self, iterator: Iterator[bytes]) -> Iterator[bytes]:
+        """Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
+        data = b""
+        for chunk in iterator:
+            for line in chunk.splitlines(keepends=True):
+                data += line
+                if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
+                    yield data
+                    data = b""
+        if data:
+            yield data
+
+    async def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
+        """Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
+        async for chunk in self._aiter_chunks(iterator):
+            # Split before decoding so splitlines() only uses \r and \n
+            for raw_line in chunk.splitlines():
+                line = raw_line.decode("utf-8")
+                sse = self.decode(line)
+                if sse:
+                    yield sse
+
+    async def _aiter_chunks(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
+        """Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
+        data = b""
+        async for chunk in iterator:
+            for line in chunk.splitlines(keepends=True):
+                data += line
+                if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
+                    yield data
+                    data = b""
+        if data:
+            yield data
+
+    def decode(self, line: str) -> ServerSentEvent | None:
+        # See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation  # noqa: E501
+
+        if not line:
+            if not self._event and not self._data and not self._last_event_id and self._retry is None:
+                return None
+
+            sse = ServerSentEvent(
+                event=self._event,
+                data="\n".join(self._data),
+                id=self._last_event_id,
+                retry=self._retry,
+            )
+
+            # NOTE: as per the SSE spec, do not reset last_event_id.
+            self._event = None
+            self._data = []
+            self._retry = None
+
+            return sse
+
+        if line.startswith(":"):
+            return None
+
+        fieldname, _, value = line.partition(":")
+
+        if value.startswith(" "):
+            value = value[1:]
+
+        if fieldname == "event":
+            self._event = value
+        elif fieldname == "data":
+            self._data.append(value)
+        elif fieldname == "id":
+            if "\0" in value:
+                pass
+            else:
+                self._last_event_id = value
+        elif fieldname == "retry":
+            try:
+                self._retry = int(value)
+            except (TypeError, ValueError):
+                pass
+        else:
+            pass  # Field is ignored.
+
+        return None
+
+
+@runtime_checkable
+class SSEBytesDecoder(Protocol):
+    def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
+        """Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
+        ...
+
+    def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
+        """Given an async iterator that yields raw binary data, iterate over it & yield every event encountered"""
+        ...
+
+
+def is_stream_class_type(typ: type) -> TypeGuard[type[Stream[object]] | type[AsyncStream[object]]]:
+    """TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`"""
+    origin = get_origin(typ) or typ
+    return inspect.isclass(origin) and issubclass(origin, (Stream, AsyncStream))
+
+
+def extract_stream_chunk_type(
+    stream_cls: type,
+    *,
+    failure_message: str | None = None,
+) -> type:
+    """Given a type like `Stream[T]`, returns the generic type variable `T`.
+
+    This also handles the case where a concrete subclass is given, e.g.
+    ```py
+    class MyStream(Stream[bytes]):
+        ...
+
+    extract_stream_chunk_type(MyStream) -> bytes
+    ```
+    """
+    from ._base_client import Stream, AsyncStream
+
+    return extract_type_var_from_base(
+        stream_cls,
+        index=0,
+        generic_bases=cast("tuple[type, ...]", (Stream, AsyncStream)),
+        failure_message=failure_message,
+    )
diff --git a/.venv/lib/python3.12/site-packages/openai/_types.py b/.venv/lib/python3.12/site-packages/openai/_types.py
new file mode 100644
index 00000000..a5cf207a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_types.py
@@ -0,0 +1,219 @@
+from __future__ import annotations
+
+from os import PathLike
+from typing import (
+    IO,
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    List,
+    Type,
+    Tuple,
+    Union,
+    Mapping,
+    TypeVar,
+    Callable,
+    Optional,
+    Sequence,
+)
+from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable
+
+import httpx
+import pydantic
+from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport
+
+if TYPE_CHECKING:
+    from ._models import BaseModel
+    from ._response import APIResponse, AsyncAPIResponse
+    from ._legacy_response import HttpxBinaryResponseContent
+
+Transport = BaseTransport
+AsyncTransport = AsyncBaseTransport
+Query = Mapping[str, object]
+Body = object
+AnyMapping = Mapping[str, object]
+ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
+_T = TypeVar("_T")
+
+
+# Approximates httpx internal ProxiesTypes and RequestFiles types
+# while adding support for `PathLike` instances
+ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]]
+ProxiesTypes = Union[str, Proxy, ProxiesDict]
+if TYPE_CHECKING:
+    Base64FileInput = Union[IO[bytes], PathLike[str]]
+    FileContent = Union[IO[bytes], bytes, PathLike[str]]
+else:
+    Base64FileInput = Union[IO[bytes], PathLike]
+    FileContent = Union[IO[bytes], bytes, PathLike]  # PathLike is not subscriptable in Python 3.8.
+FileTypes = Union[
+    # file (or bytes)
+    FileContent,
+    # (filename, file (or bytes))
+    Tuple[Optional[str], FileContent],
+    # (filename, file (or bytes), content_type)
+    Tuple[Optional[str], FileContent, Optional[str]],
+    # (filename, file (or bytes), content_type, headers)
+    Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],
+]
+RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
+
+# duplicate of the above but without our custom file support
+HttpxFileContent = Union[IO[bytes], bytes]
+HttpxFileTypes = Union[
+    # file (or bytes)
+    HttpxFileContent,
+    # (filename, file (or bytes))
+    Tuple[Optional[str], HttpxFileContent],
+    # (filename, file (or bytes), content_type)
+    Tuple[Optional[str], HttpxFileContent, Optional[str]],
+    # (filename, file (or bytes), content_type, headers)
+    Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]],
+]
+HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]]
+
+# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT
+# where ResponseT includes `None`. In order to support directly
+# passing `None`, overloads would have to be defined for every
+# method that uses `ResponseT` which would lead to an unacceptable
+# amount of code duplication and make it unreadable. See _base_client.py
+# for example usage.
+#
+# This unfortunately means that you will either have
+# to import this type and pass it explicitly:
+#
+# from openai import NoneType
+# client.get('/foo', cast_to=NoneType)
+#
+# or build it yourself:
+#
+# client.get('/foo', cast_to=type(None))
+if TYPE_CHECKING:
+    NoneType: Type[None]
+else:
+    NoneType = type(None)
+
+
+class RequestOptions(TypedDict, total=False):
+    headers: Headers
+    max_retries: int
+    timeout: float | Timeout | None
+    params: Query
+    extra_json: AnyMapping
+    idempotency_key: str
+
+
+# Sentinel class used until PEP 0661 is accepted
+class NotGiven:
+    """
+    A sentinel singleton class used to distinguish omitted keyword arguments
+    from those passed in with the value None (which may have different behavior).
+
+    For example:
+
+    ```py
+    def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
+
+
+    get(timeout=1)  # 1s timeout
+    get(timeout=None)  # No timeout
+    get()  # Default timeout behavior, which may not be statically known at the method definition.
+    ```
+    """
+
+    def __bool__(self) -> Literal[False]:
+        return False
+
+    @override
+    def __repr__(self) -> str:
+        return "NOT_GIVEN"
+
+
+NotGivenOr = Union[_T, NotGiven]
+NOT_GIVEN = NotGiven()
+
+
+class Omit:
+    """In certain situations you need to be able to represent a case where a default value has
+    to be explicitly removed and `None` is not an appropriate substitute, for example:
+
+    ```py
+    # as the default `Content-Type` header is `application/json` that will be sent
+    client.post("/upload/files", files={"file": b"my raw file content"})
+
+    # you can't explicitly override the header as it has to be dynamically generated
+    # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983'
+    client.post(..., headers={"Content-Type": "multipart/form-data"})
+
+    # instead you can remove the default `application/json` header by passing Omit
+    client.post(..., headers={"Content-Type": Omit()})
+    ```
+    """
+
+    def __bool__(self) -> Literal[False]:
+        return False
+
+
+@runtime_checkable
+class ModelBuilderProtocol(Protocol):
+    @classmethod
+    def build(
+        cls: type[_T],
+        *,
+        response: Response,
+        data: object,
+    ) -> _T: ...
+
+
+Headers = Mapping[str, Union[str, Omit]]
+
+
+class HeadersLikeProtocol(Protocol):
+    def get(self, __key: str) -> str | None: ...
+
+
+HeadersLike = Union[Headers, HeadersLikeProtocol]
+
+ResponseT = TypeVar(
+    "ResponseT",
+    bound=Union[
+        object,
+        str,
+        None,
+        "BaseModel",
+        List[Any],
+        Dict[str, Any],
+        Response,
+        ModelBuilderProtocol,
+        "APIResponse[Any]",
+        "AsyncAPIResponse[Any]",
+        "HttpxBinaryResponseContent",
+    ],
+)
+
+StrBytesIntFloat = Union[str, bytes, int, float]
+
+# Note: copied from Pydantic
+# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79
+IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]]
+
+PostParser = Callable[[Any], Any]
+
+
+@runtime_checkable
+class InheritsGeneric(Protocol):
+    """Represents a type that has inherited from `Generic`
+
+    The `__orig_bases__` property can be used to determine the resolved
+    type variable for a given base class.
+    """
+
+    __orig_bases__: tuple[_GenericAlias]
+
+
+class _GenericAlias(Protocol):
+    __origin__: type[object]
+
+
+class HttpxSendArgs(TypedDict, total=False):
+    auth: httpx.Auth
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/__init__.py b/.venv/lib/python3.12/site-packages/openai/_utils/__init__.py
new file mode 100644
index 00000000..bd01c088
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/__init__.py
@@ -0,0 +1,60 @@
+from ._logs import SensitiveHeadersFilter as SensitiveHeadersFilter
+from ._sync import asyncify as asyncify
+from ._proxy import LazyProxy as LazyProxy
+from ._utils import (
+    flatten as flatten,
+    is_dict as is_dict,
+    is_list as is_list,
+    is_given as is_given,
+    is_tuple as is_tuple,
+    json_safe as json_safe,
+    lru_cache as lru_cache,
+    is_mapping as is_mapping,
+    is_tuple_t as is_tuple_t,
+    parse_date as parse_date,
+    is_iterable as is_iterable,
+    is_sequence as is_sequence,
+    coerce_float as coerce_float,
+    is_mapping_t as is_mapping_t,
+    removeprefix as removeprefix,
+    removesuffix as removesuffix,
+    extract_files as extract_files,
+    is_sequence_t as is_sequence_t,
+    required_args as required_args,
+    coerce_boolean as coerce_boolean,
+    coerce_integer as coerce_integer,
+    file_from_path as file_from_path,
+    parse_datetime as parse_datetime,
+    is_azure_client as is_azure_client,
+    strip_not_given as strip_not_given,
+    deepcopy_minimal as deepcopy_minimal,
+    get_async_library as get_async_library,
+    maybe_coerce_float as maybe_coerce_float,
+    get_required_header as get_required_header,
+    maybe_coerce_boolean as maybe_coerce_boolean,
+    maybe_coerce_integer as maybe_coerce_integer,
+    is_async_azure_client as is_async_azure_client,
+)
+from ._typing import (
+    is_list_type as is_list_type,
+    is_union_type as is_union_type,
+    extract_type_arg as extract_type_arg,
+    is_iterable_type as is_iterable_type,
+    is_required_type as is_required_type,
+    is_annotated_type as is_annotated_type,
+    is_type_alias_type as is_type_alias_type,
+    strip_annotated_type as strip_annotated_type,
+    extract_type_var_from_base as extract_type_var_from_base,
+)
+from ._streams import consume_sync_iterator as consume_sync_iterator, consume_async_iterator as consume_async_iterator
+from ._transform import (
+    PropertyInfo as PropertyInfo,
+    transform as transform,
+    async_transform as async_transform,
+    maybe_transform as maybe_transform,
+    async_maybe_transform as async_maybe_transform,
+)
+from ._reflection import (
+    function_has_argument as function_has_argument,
+    assert_signatures_in_sync as assert_signatures_in_sync,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_logs.py b/.venv/lib/python3.12/site-packages/openai/_utils/_logs.py
new file mode 100644
index 00000000..37694693
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_logs.py
@@ -0,0 +1,42 @@
+import os
+import logging
+from typing_extensions import override
+
+from ._utils import is_dict
+
+logger: logging.Logger = logging.getLogger("openai")
+httpx_logger: logging.Logger = logging.getLogger("httpx")
+
+
+SENSITIVE_HEADERS = {"api-key", "authorization"}
+
+
+def _basic_config() -> None:
+    # e.g. [2023-10-05 14:12:26 - openai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
+    logging.basicConfig(
+        format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
+        datefmt="%Y-%m-%d %H:%M:%S",
+    )
+
+
+def setup_logging() -> None:
+    env = os.environ.get("OPENAI_LOG")
+    if env == "debug":
+        _basic_config()
+        logger.setLevel(logging.DEBUG)
+        httpx_logger.setLevel(logging.DEBUG)
+    elif env == "info":
+        _basic_config()
+        logger.setLevel(logging.INFO)
+        httpx_logger.setLevel(logging.INFO)
+
+
+class SensitiveHeadersFilter(logging.Filter):
+    @override
+    def filter(self, record: logging.LogRecord) -> bool:
+        if is_dict(record.args) and "headers" in record.args and is_dict(record.args["headers"]):
+            headers = record.args["headers"] = {**record.args["headers"]}
+            for header in headers:
+                if str(header).lower() in SENSITIVE_HEADERS:
+                    headers[header] = "<redacted>"
+        return True
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_proxy.py b/.venv/lib/python3.12/site-packages/openai/_utils/_proxy.py
new file mode 100644
index 00000000..ffd883e9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_proxy.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from typing import Generic, TypeVar, Iterable, cast
+from typing_extensions import override
+
+T = TypeVar("T")
+
+
+class LazyProxy(Generic[T], ABC):
+    """Implements data methods to pretend that an instance is another instance.
+
+    This includes forwarding attribute access and other methods.
+    """
+
+    # Note: we have to special case proxies that themselves return proxies
+    # to support using a proxy as a catch-all for any random access, e.g. `proxy.foo.bar.baz`
+
+    def __getattr__(self, attr: str) -> object:
+        proxied = self.__get_proxied__()
+        if isinstance(proxied, LazyProxy):
+            return proxied  # pyright: ignore
+        return getattr(proxied, attr)
+
+    @override
+    def __repr__(self) -> str:
+        proxied = self.__get_proxied__()
+        if isinstance(proxied, LazyProxy):
+            return proxied.__class__.__name__
+        return repr(self.__get_proxied__())
+
+    @override
+    def __str__(self) -> str:
+        proxied = self.__get_proxied__()
+        if isinstance(proxied, LazyProxy):
+            return proxied.__class__.__name__
+        return str(proxied)
+
+    @override
+    def __dir__(self) -> Iterable[str]:
+        proxied = self.__get_proxied__()
+        if isinstance(proxied, LazyProxy):
+            return []
+        return proxied.__dir__()
+
+    @property  # type: ignore
+    @override
+    def __class__(self) -> type:  # pyright: ignore
+        proxied = self.__get_proxied__()
+        if issubclass(type(proxied), LazyProxy):
+            return type(proxied)
+        return proxied.__class__
+
+    def __get_proxied__(self) -> T:
+        return self.__load__()
+
+    def __as_proxied__(self) -> T:
+        """Helper method that returns the current proxy, typed as the loaded object"""
+        return cast(T, self)
+
+    @abstractmethod
+    def __load__(self) -> T: ...
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_reflection.py b/.venv/lib/python3.12/site-packages/openai/_utils/_reflection.py
new file mode 100644
index 00000000..bdaca29e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_reflection.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import inspect
+from typing import Any, Callable
+
+
+def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool:
+    """Returns whether or not the given function has a specific parameter"""
+    sig = inspect.signature(func)
+    return arg_name in sig.parameters
+
+
+def assert_signatures_in_sync(
+    source_func: Callable[..., Any],
+    check_func: Callable[..., Any],
+    *,
+    exclude_params: set[str] = set(),
+    description: str = "",
+) -> None:
+    """Ensure that the signature of the second function matches the first."""
+
+    check_sig = inspect.signature(check_func)
+    source_sig = inspect.signature(source_func)
+
+    errors: list[str] = []
+
+    for name, source_param in source_sig.parameters.items():
+        if name in exclude_params:
+            continue
+
+        custom_param = check_sig.parameters.get(name)
+        if not custom_param:
+            errors.append(f"the `{name}` param is missing")
+            continue
+
+        if custom_param.annotation != source_param.annotation:
+            errors.append(
+                f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}"
+            )
+            continue
+
+    if errors:
+        raise AssertionError(
+            f"{len(errors)} errors encountered when comparing signatures{description}:\n\n" + "\n\n".join(errors)
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_streams.py b/.venv/lib/python3.12/site-packages/openai/_utils/_streams.py
new file mode 100644
index 00000000..f4a0208f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_streams.py
@@ -0,0 +1,12 @@
+from typing import Any
+from typing_extensions import Iterator, AsyncIterator
+
+
+def consume_sync_iterator(iterator: Iterator[Any]) -> None:
+    for _ in iterator:
+        ...
+
+
+async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None:
+    async for _ in iterator:
+        ...
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_sync.py b/.venv/lib/python3.12/site-packages/openai/_utils/_sync.py
new file mode 100644
index 00000000..ad7ec71b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_sync.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+import sys
+import asyncio
+import functools
+import contextvars
+from typing import Any, TypeVar, Callable, Awaitable
+from typing_extensions import ParamSpec
+
+import anyio
+import sniffio
+import anyio.to_thread
+
+T_Retval = TypeVar("T_Retval")
+T_ParamSpec = ParamSpec("T_ParamSpec")
+
+
+if sys.version_info >= (3, 9):
+    _asyncio_to_thread = asyncio.to_thread
+else:
+    # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
+    # for Python 3.8 support
+    async def _asyncio_to_thread(
+        func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
+    ) -> Any:
+        """Asynchronously run function *func* in a separate thread.
+
+        Any *args and **kwargs supplied for this function are directly passed
+        to *func*. Also, the current :class:`contextvars.Context` is propagated,
+        allowing context variables from the main thread to be accessed in the
+        separate thread.
+
+        Returns a coroutine that can be awaited to get the eventual result of *func*.
+        """
+        loop = asyncio.events.get_running_loop()
+        ctx = contextvars.copy_context()
+        func_call = functools.partial(ctx.run, func, *args, **kwargs)
+        return await loop.run_in_executor(None, func_call)
+
+
+async def to_thread(
+    func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
+) -> T_Retval:
+    if sniffio.current_async_library() == "asyncio":
+        return await _asyncio_to_thread(func, *args, **kwargs)
+
+    return await anyio.to_thread.run_sync(
+        functools.partial(func, *args, **kwargs),
+    )
+
+
+# inspired by `asyncer`, https://github.com/tiangolo/asyncer
+def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
+    """
+    Take a blocking function and create an async one that receives the same
+    positional and keyword arguments. For python version 3.9 and above, it uses
+    asyncio.to_thread to run the function in a separate thread. For python version
+    3.8, it uses locally defined copy of the asyncio.to_thread function which was
+    introduced in python 3.9.
+
+    Usage:
+
+    ```python
+    def blocking_func(arg1, arg2, kwarg1=None):
+        # blocking code
+        return result
+
+
+    result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1)
+    ```
+
+    ## Arguments
+
+    `function`: a blocking regular callable (e.g. a function)
+
+    ## Return
+
+    An async function that takes the same positional and keyword arguments as the
+    original one, that when called runs the same original function in a thread worker
+    and returns the result.
+    """
+
+    async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval:
+        return await to_thread(function, *args, **kwargs)
+
+    return wrapper
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_transform.py b/.venv/lib/python3.12/site-packages/openai/_utils/_transform.py
new file mode 100644
index 00000000..18afd9d8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_transform.py
@@ -0,0 +1,402 @@
+from __future__ import annotations
+
+import io
+import base64
+import pathlib
+from typing import Any, Mapping, TypeVar, cast
+from datetime import date, datetime
+from typing_extensions import Literal, get_args, override, get_type_hints
+
+import anyio
+import pydantic
+
+from ._utils import (
+    is_list,
+    is_mapping,
+    is_iterable,
+)
+from .._files import is_base64_file_input
+from ._typing import (
+    is_list_type,
+    is_union_type,
+    extract_type_arg,
+    is_iterable_type,
+    is_required_type,
+    is_annotated_type,
+    strip_annotated_type,
+)
+from .._compat import get_origin, model_dump, is_typeddict
+
+_T = TypeVar("_T")
+
+
+# TODO: support for drilling globals() and locals()
+# TODO: ensure works correctly with forward references in all cases
+
+
+PropertyFormat = Literal["iso8601", "base64", "custom"]
+
+
+class PropertyInfo:
+    """Metadata class to be used in Annotated types to provide information about a given type.
+
+    For example:
+
+    class MyParams(TypedDict):
+        account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
+
+    This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
+    """
+
+    alias: str | None
+    format: PropertyFormat | None
+    format_template: str | None
+    discriminator: str | None
+
+    def __init__(
+        self,
+        *,
+        alias: str | None = None,
+        format: PropertyFormat | None = None,
+        format_template: str | None = None,
+        discriminator: str | None = None,
+    ) -> None:
+        self.alias = alias
+        self.format = format
+        self.format_template = format_template
+        self.discriminator = discriminator
+
+    @override
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')"
+
+
+def maybe_transform(
+    data: object,
+    expected_type: object,
+) -> Any | None:
+    """Wrapper over `transform()` that allows `None` to be passed.
+
+    See `transform()` for more details.
+    """
+    if data is None:
+        return None
+    return transform(data, expected_type)
+
+
+# Wrapper over _transform_recursive providing fake types
+def transform(
+    data: _T,
+    expected_type: object,
+) -> _T:
+    """Transform dictionaries based off of type information from the given type, for example:
+
+    ```py
+    class Params(TypedDict, total=False):
+        card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
+
+
+    transformed = transform({"card_id": "<my card ID>"}, Params)
+    # {'cardID': '<my card ID>'}
+    ```
+
+    Any keys / data that does not have type information given will be included as is.
+
+    It should be noted that the transformations that this function does are not represented in the type system.
+    """
+    transformed = _transform_recursive(data, annotation=cast(type, expected_type))
+    return cast(_T, transformed)
+
+
+def _get_annotated_type(type_: type) -> type | None:
+    """If the given type is an `Annotated` type then it is returned, if not `None` is returned.
+
+    This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
+    """
+    if is_required_type(type_):
+        # Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
+        type_ = get_args(type_)[0]
+
+    if is_annotated_type(type_):
+        return type_
+
+    return None
+
+
+def _maybe_transform_key(key: str, type_: type) -> str:
+    """Transform the given `data` based on the annotations provided in `type_`.
+
+    Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
+    """
+    annotated_type = _get_annotated_type(type_)
+    if annotated_type is None:
+        # no `Annotated` definition for this type, no transformation needed
+        return key
+
+    # ignore the first argument as it is the actual type
+    annotations = get_args(annotated_type)[1:]
+    for annotation in annotations:
+        if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
+            return annotation.alias
+
+    return key
+
+
+def _transform_recursive(
+    data: object,
+    *,
+    annotation: type,
+    inner_type: type | None = None,
+) -> object:
+    """Transform the given data against the expected type.
+
+    Args:
+        annotation: The direct type annotation given to the particular piece of data.
+            This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
+
+        inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
+            is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
+            the list can be transformed using the metadata from the container type.
+
+            Defaults to the same value as the `annotation` argument.
+    """
+    if inner_type is None:
+        inner_type = annotation
+
+    stripped_type = strip_annotated_type(inner_type)
+    origin = get_origin(stripped_type) or stripped_type
+    if is_typeddict(stripped_type) and is_mapping(data):
+        return _transform_typeddict(data, stripped_type)
+
+    if origin == dict and is_mapping(data):
+        items_type = get_args(stripped_type)[1]
+        return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
+
+    if (
+        # List[T]
+        (is_list_type(stripped_type) and is_list(data))
+        # Iterable[T]
+        or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+    ):
+        # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
+        # intended as an iterable, so we don't transform it.
+        if isinstance(data, dict):
+            return cast(object, data)
+
+        inner_type = extract_type_arg(stripped_type, 0)
+        return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
+
+    if is_union_type(stripped_type):
+        # For union types we run the transformation against all subtypes to ensure that everything is transformed.
+        #
+        # TODO: there may be edge cases where the same normalized field name will transform to two different names
+        # in different subtypes.
+        for subtype in get_args(stripped_type):
+            data = _transform_recursive(data, annotation=annotation, inner_type=subtype)
+        return data
+
+    if isinstance(data, pydantic.BaseModel):
+        return model_dump(data, exclude_unset=True, mode="json")
+
+    annotated_type = _get_annotated_type(annotation)
+    if annotated_type is None:
+        return data
+
+    # ignore the first argument as it is the actual type
+    annotations = get_args(annotated_type)[1:]
+    for annotation in annotations:
+        if isinstance(annotation, PropertyInfo) and annotation.format is not None:
+            return _format_data(data, annotation.format, annotation.format_template)
+
+    return data
+
+
+def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
+    if isinstance(data, (date, datetime)):
+        if format_ == "iso8601":
+            return data.isoformat()
+
+        if format_ == "custom" and format_template is not None:
+            return data.strftime(format_template)
+
+    if format_ == "base64" and is_base64_file_input(data):
+        binary: str | bytes | None = None
+
+        if isinstance(data, pathlib.Path):
+            binary = data.read_bytes()
+        elif isinstance(data, io.IOBase):
+            binary = data.read()
+
+            if isinstance(binary, str):  # type: ignore[unreachable]
+                binary = binary.encode()
+
+        if not isinstance(binary, bytes):
+            raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
+
+        return base64.b64encode(binary).decode("ascii")
+
+    return data
+
+
+def _transform_typeddict(
+    data: Mapping[str, object],
+    expected_type: type,
+) -> Mapping[str, object]:
+    result: dict[str, object] = {}
+    annotations = get_type_hints(expected_type, include_extras=True)
+    for key, value in data.items():
+        type_ = annotations.get(key)
+        if type_ is None:
+            # we do not have a type annotation for this field, leave it as is
+            result[key] = value
+        else:
+            result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_)
+    return result
+
+
+async def async_maybe_transform(
+    data: object,
+    expected_type: object,
+) -> Any | None:
+    """Wrapper over `async_transform()` that allows `None` to be passed.
+
+    See `async_transform()` for more details.
+    """
+    if data is None:
+        return None
+    return await async_transform(data, expected_type)
+
+
+async def async_transform(
+    data: _T,
+    expected_type: object,
+) -> _T:
+    """Transform dictionaries based off of type information from the given type, for example:
+
+    ```py
+    class Params(TypedDict, total=False):
+        card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
+
+
+    transformed = transform({"card_id": "<my card ID>"}, Params)
+    # {'cardID': '<my card ID>'}
+    ```
+
+    Any keys / data that does not have type information given will be included as is.
+
+    It should be noted that the transformations that this function does are not represented in the type system.
+    """
+    transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type))
+    return cast(_T, transformed)
+
+
+async def _async_transform_recursive(
+    data: object,
+    *,
+    annotation: type,
+    inner_type: type | None = None,
+) -> object:
+    """Transform the given data against the expected type.
+
+    Args:
+        annotation: The direct type annotation given to the particular piece of data.
+            This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
+
+        inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
+            is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
+            the list can be transformed using the metadata from the container type.
+
+            Defaults to the same value as the `annotation` argument.
+    """
+    if inner_type is None:
+        inner_type = annotation
+
+    stripped_type = strip_annotated_type(inner_type)
+    origin = get_origin(stripped_type) or stripped_type
+    if is_typeddict(stripped_type) and is_mapping(data):
+        return await _async_transform_typeddict(data, stripped_type)
+
+    if origin == dict and is_mapping(data):
+        items_type = get_args(stripped_type)[1]
+        return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
+
+    if (
+        # List[T]
+        (is_list_type(stripped_type) and is_list(data))
+        # Iterable[T]
+        or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+    ):
+        # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
+        # intended as an iterable, so we don't transform it.
+        if isinstance(data, dict):
+            return cast(object, data)
+
+        inner_type = extract_type_arg(stripped_type, 0)
+        return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
+
+    if is_union_type(stripped_type):
+        # For union types we run the transformation against all subtypes to ensure that everything is transformed.
+        #
+        # TODO: there may be edge cases where the same normalized field name will transform to two different names
+        # in different subtypes.
+        for subtype in get_args(stripped_type):
+            data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype)
+        return data
+
+    if isinstance(data, pydantic.BaseModel):
+        return model_dump(data, exclude_unset=True, mode="json")
+
+    annotated_type = _get_annotated_type(annotation)
+    if annotated_type is None:
+        return data
+
+    # ignore the first argument as it is the actual type
+    annotations = get_args(annotated_type)[1:]
+    for annotation in annotations:
+        if isinstance(annotation, PropertyInfo) and annotation.format is not None:
+            return await _async_format_data(data, annotation.format, annotation.format_template)
+
+    return data
+
+
+async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
+    if isinstance(data, (date, datetime)):
+        if format_ == "iso8601":
+            return data.isoformat()
+
+        if format_ == "custom" and format_template is not None:
+            return data.strftime(format_template)
+
+    if format_ == "base64" and is_base64_file_input(data):
+        binary: str | bytes | None = None
+
+        if isinstance(data, pathlib.Path):
+            binary = await anyio.Path(data).read_bytes()
+        elif isinstance(data, io.IOBase):
+            binary = data.read()
+
+            if isinstance(binary, str):  # type: ignore[unreachable]
+                binary = binary.encode()
+
+        if not isinstance(binary, bytes):
+            raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
+
+        return base64.b64encode(binary).decode("ascii")
+
+    return data
+
+
+async def _async_transform_typeddict(
+    data: Mapping[str, object],
+    expected_type: type,
+) -> Mapping[str, object]:
+    result: dict[str, object] = {}
+    annotations = get_type_hints(expected_type, include_extras=True)
+    for key, value in data.items():
+        type_ = annotations.get(key)
+        if type_ is None:
+            # we do not have a type annotation for this field, leave it as is
+            result[key] = value
+        else:
+            result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_)
+    return result
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_typing.py b/.venv/lib/python3.12/site-packages/openai/_utils/_typing.py
new file mode 100644
index 00000000..278749b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_typing.py
@@ -0,0 +1,149 @@
+from __future__ import annotations
+
+import sys
+import typing
+import typing_extensions
+from typing import Any, TypeVar, Iterable, cast
+from collections import abc as _c_abc
+from typing_extensions import (
+    TypeIs,
+    Required,
+    Annotated,
+    get_args,
+    get_origin,
+)
+
+from .._types import InheritsGeneric
+from .._compat import is_union as _is_union
+
+
+def is_annotated_type(typ: type) -> bool:
+    return get_origin(typ) == Annotated
+
+
+def is_list_type(typ: type) -> bool:
+    return (get_origin(typ) or typ) == list
+
+
+def is_iterable_type(typ: type) -> bool:
+    """If the given type is `typing.Iterable[T]`"""
+    origin = get_origin(typ) or typ
+    return origin == Iterable or origin == _c_abc.Iterable
+
+
+def is_union_type(typ: type) -> bool:
+    return _is_union(get_origin(typ))
+
+
+def is_required_type(typ: type) -> bool:
+    return get_origin(typ) == Required
+
+
+def is_typevar(typ: type) -> bool:
+    # type ignore is required because type checkers
+    # think this expression will always return False
+    return type(typ) == TypeVar  # type: ignore
+
+
+_TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,)
+if sys.version_info >= (3, 12):
+    _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType)
+
+
+def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]:
+    """Return whether the provided argument is an instance of `TypeAliasType`.
+
+    ```python
+    type Int = int
+    is_type_alias_type(Int)
+    # > True
+    Str = TypeAliasType("Str", str)
+    is_type_alias_type(Str)
+    # > True
+    ```
+    """
+    return isinstance(tp, _TYPE_ALIAS_TYPES)
+
+
+# Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]]
+def strip_annotated_type(typ: type) -> type:
+    if is_required_type(typ) or is_annotated_type(typ):
+        return strip_annotated_type(cast(type, get_args(typ)[0]))
+
+    return typ
+
+
+def extract_type_arg(typ: type, index: int) -> type:
+    args = get_args(typ)
+    try:
+        return cast(type, args[index])
+    except IndexError as err:
+        raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err
+
+
+def extract_type_var_from_base(
+    typ: type,
+    *,
+    generic_bases: tuple[type, ...],
+    index: int,
+    failure_message: str | None = None,
+) -> type:
+    """Given a type like `Foo[T]`, returns the generic type variable `T`.
+
+    This also handles the case where a concrete subclass is given, e.g.
+    ```py
+    class MyResponse(Foo[bytes]):
+        ...
+
+    extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes
+    ```
+
+    And where a generic subclass is given:
+    ```py
+    _T = TypeVar('_T')
+    class MyResponse(Foo[_T]):
+        ...
+
+    extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes
+    ```
+    """
+    cls = cast(object, get_origin(typ) or typ)
+    if cls in generic_bases:
+        # we're given the class directly
+        return extract_type_arg(typ, index)
+
+    # if a subclass is given
+    # ---
+    # this is needed as __orig_bases__ is not present in the typeshed stubs
+    # because it is intended to be for internal use only, however there does
+    # not seem to be a way to resolve generic TypeVars for inherited subclasses
+    # without using it.
+    if isinstance(cls, InheritsGeneric):
+        target_base_class: Any | None = None
+        for base in cls.__orig_bases__:
+            if base.__origin__ in generic_bases:
+                target_base_class = base
+                break
+
+        if target_base_class is None:
+            raise RuntimeError(
+                "Could not find the generic base class;\n"
+                "This should never happen;\n"
+                f"Does {cls} inherit from one of {generic_bases} ?"
+            )
+
+        extracted = extract_type_arg(target_base_class, index)
+        if is_typevar(extracted):
+            # If the extracted type argument is itself a type variable
+            # then that means the subclass itself is generic, so we have
+            # to resolve the type argument from the class itself, not
+            # the base class.
+            #
+            # Note: if there is more than 1 type argument, the subclass could
+            # change the ordering of the type arguments, this is not currently
+            # supported.
+            return extract_type_arg(typ, index)
+
+        return extracted
+
+    raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}")
diff --git a/.venv/lib/python3.12/site-packages/openai/_utils/_utils.py b/.venv/lib/python3.12/site-packages/openai/_utils/_utils.py
new file mode 100644
index 00000000..d6734e6b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_utils/_utils.py
@@ -0,0 +1,430 @@
+from __future__ import annotations
+
+import os
+import re
+import inspect
+import functools
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Tuple,
+    Mapping,
+    TypeVar,
+    Callable,
+    Iterable,
+    Sequence,
+    cast,
+    overload,
+)
+from pathlib import Path
+from datetime import date, datetime
+from typing_extensions import TypeGuard
+
+import sniffio
+
+from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
+from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
+
+_T = TypeVar("_T")
+_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
+_MappingT = TypeVar("_MappingT", bound=Mapping[str, object])
+_SequenceT = TypeVar("_SequenceT", bound=Sequence[object])
+CallableT = TypeVar("CallableT", bound=Callable[..., Any])
+
+if TYPE_CHECKING:
+    from ..lib.azure import AzureOpenAI, AsyncAzureOpenAI
+
+
+def flatten(t: Iterable[Iterable[_T]]) -> list[_T]:
+    return [item for sublist in t for item in sublist]
+
+
+def extract_files(
+    # TODO: this needs to take Dict but variance issues.....
+    # create protocol type ?
+    query: Mapping[str, object],
+    *,
+    paths: Sequence[Sequence[str]],
+) -> list[tuple[str, FileTypes]]:
+    """Recursively extract files from the given dictionary based on specified paths.
+
+    A path may look like this ['foo', 'files', '<array>', 'data'].
+
+    Note: this mutates the given dictionary.
+    """
+    files: list[tuple[str, FileTypes]] = []
+    for path in paths:
+        files.extend(_extract_items(query, path, index=0, flattened_key=None))
+    return files
+
+
+def _extract_items(
+    obj: object,
+    path: Sequence[str],
+    *,
+    index: int,
+    flattened_key: str | None,
+) -> list[tuple[str, FileTypes]]:
+    try:
+        key = path[index]
+    except IndexError:
+        if isinstance(obj, NotGiven):
+            # no value was provided - we can safely ignore
+            return []
+
+        # cyclical import
+        from .._files import assert_is_file_content
+
+        # We have exhausted the path, return the entry we found.
+        assert_is_file_content(obj, key=flattened_key)
+        assert flattened_key is not None
+        return [(flattened_key, cast(FileTypes, obj))]
+
+    index += 1
+    if is_dict(obj):
+        try:
+            # We are at the last entry in the path so we must remove the field
+            if (len(path)) == index:
+                item = obj.pop(key)
+            else:
+                item = obj[key]
+        except KeyError:
+            # Key was not present in the dictionary, this is not indicative of an error
+            # as the given path may not point to a required field. We also do not want
+            # to enforce required fields as the API may differ from the spec in some cases.
+            return []
+        if flattened_key is None:
+            flattened_key = key
+        else:
+            flattened_key += f"[{key}]"
+        return _extract_items(
+            item,
+            path,
+            index=index,
+            flattened_key=flattened_key,
+        )
+    elif is_list(obj):
+        if key != "<array>":
+            return []
+
+        return flatten(
+            [
+                _extract_items(
+                    item,
+                    path,
+                    index=index,
+                    flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
+                )
+                for item in obj
+            ]
+        )
+
+    # Something unexpected was passed, just ignore it.
+    return []
+
+
+def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]:
+    return not isinstance(obj, NotGiven)
+
+
+# Type safe methods for narrowing types with TypeVars.
+# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown],
+# however this cause Pyright to rightfully report errors. As we know we don't
+# care about the contained types we can safely use `object` in it's place.
+#
+# There are two separate functions defined, `is_*` and `is_*_t` for different use cases.
+# `is_*` is for when you're dealing with an unknown input
+# `is_*_t` is for when you're narrowing a known union type to a specific subset
+
+
+def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]:
+    return isinstance(obj, tuple)
+
+
+def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]:
+    return isinstance(obj, tuple)
+
+
+def is_sequence(obj: object) -> TypeGuard[Sequence[object]]:
+    return isinstance(obj, Sequence)
+
+
+def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]:
+    return isinstance(obj, Sequence)
+
+
+def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]:
+    return isinstance(obj, Mapping)
+
+
+def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]:
+    return isinstance(obj, Mapping)
+
+
+def is_dict(obj: object) -> TypeGuard[dict[object, object]]:
+    return isinstance(obj, dict)
+
+
+def is_list(obj: object) -> TypeGuard[list[object]]:
+    return isinstance(obj, list)
+
+
+def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
+    return isinstance(obj, Iterable)
+
+
+def deepcopy_minimal(item: _T) -> _T:
+    """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
+
+    - mappings, e.g. `dict`
+    - list
+
+    This is done for performance reasons.
+    """
+    if is_mapping(item):
+        return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
+    if is_list(item):
+        return cast(_T, [deepcopy_minimal(entry) for entry in item])
+    return item
+
+
+# copied from https://github.com/Rapptz/RoboDanny
+def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
+    size = len(seq)
+    if size == 0:
+        return ""
+
+    if size == 1:
+        return seq[0]
+
+    if size == 2:
+        return f"{seq[0]} {final} {seq[1]}"
+
+    return delim.join(seq[:-1]) + f" {final} {seq[-1]}"
+
+
+def quote(string: str) -> str:
+    """Add single quotation marks around the given string. Does *not* do any escaping."""
+    return f"'{string}'"
+
+
+def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]:
+    """Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function.
+
+    Useful for enforcing runtime validation of overloaded functions.
+
+    Example usage:
+    ```py
+    @overload
+    def foo(*, a: str) -> str: ...
+
+
+    @overload
+    def foo(*, b: bool) -> str: ...
+
+
+    # This enforces the same constraints that a static type checker would
+    # i.e. that either a or b must be passed to the function
+    @required_args(["a"], ["b"])
+    def foo(*, a: str | None = None, b: bool | None = None) -> str: ...
+    ```
+    """
+
+    def inner(func: CallableT) -> CallableT:
+        params = inspect.signature(func).parameters
+        positional = [
+            name
+            for name, param in params.items()
+            if param.kind
+            in {
+                param.POSITIONAL_ONLY,
+                param.POSITIONAL_OR_KEYWORD,
+            }
+        ]
+
+        @functools.wraps(func)
+        def wrapper(*args: object, **kwargs: object) -> object:
+            given_params: set[str] = set()
+            for i, _ in enumerate(args):
+                try:
+                    given_params.add(positional[i])
+                except IndexError:
+                    raise TypeError(
+                        f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given"
+                    ) from None
+
+            for key in kwargs.keys():
+                given_params.add(key)
+
+            for variant in variants:
+                matches = all((param in given_params for param in variant))
+                if matches:
+                    break
+            else:  # no break
+                if len(variants) > 1:
+                    variations = human_join(
+                        ["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants]
+                    )
+                    msg = f"Missing required arguments; Expected either {variations} arguments to be given"
+                else:
+                    assert len(variants) > 0
+
+                    # TODO: this error message is not deterministic
+                    missing = list(set(variants[0]) - given_params)
+                    if len(missing) > 1:
+                        msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}"
+                    else:
+                        msg = f"Missing required argument: {quote(missing[0])}"
+                raise TypeError(msg)
+            return func(*args, **kwargs)
+
+        return wrapper  # type: ignore
+
+    return inner
+
+
+_K = TypeVar("_K")
+_V = TypeVar("_V")
+
+
+@overload
+def strip_not_given(obj: None) -> None: ...
+
+
+@overload
+def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ...
+
+
+@overload
+def strip_not_given(obj: object) -> object: ...
+
+
+def strip_not_given(obj: object | None) -> object:
+    """Remove all top-level keys where their values are instances of `NotGiven`"""
+    if obj is None:
+        return None
+
+    if not is_mapping(obj):
+        return obj
+
+    return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)}
+
+
+def coerce_integer(val: str) -> int:
+    return int(val, base=10)
+
+
+def coerce_float(val: str) -> float:
+    return float(val)
+
+
+def coerce_boolean(val: str) -> bool:
+    return val == "true" or val == "1" or val == "on"
+
+
+def maybe_coerce_integer(val: str | None) -> int | None:
+    if val is None:
+        return None
+    return coerce_integer(val)
+
+
+def maybe_coerce_float(val: str | None) -> float | None:
+    if val is None:
+        return None
+    return coerce_float(val)
+
+
+def maybe_coerce_boolean(val: str | None) -> bool | None:
+    if val is None:
+        return None
+    return coerce_boolean(val)
+
+
+def removeprefix(string: str, prefix: str) -> str:
+    """Remove a prefix from a string.
+
+    Backport of `str.removeprefix` for Python < 3.9
+    """
+    if string.startswith(prefix):
+        return string[len(prefix) :]
+    return string
+
+
+def removesuffix(string: str, suffix: str) -> str:
+    """Remove a suffix from a string.
+
+    Backport of `str.removesuffix` for Python < 3.9
+    """
+    if string.endswith(suffix):
+        return string[: -len(suffix)]
+    return string
+
+
+def file_from_path(path: str) -> FileTypes:
+    contents = Path(path).read_bytes()
+    file_name = os.path.basename(path)
+    return (file_name, contents)
+
+
+def get_required_header(headers: HeadersLike, header: str) -> str:
+    lower_header = header.lower()
+    if is_mapping_t(headers):
+        # mypy doesn't understand the type narrowing here
+        for k, v in headers.items():  # type: ignore
+            if k.lower() == lower_header and isinstance(v, str):
+                return v
+
+    # to deal with the case where the header looks like Stainless-Event-Id
+    intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize())
+
+    for normalized_header in [header, lower_header, header.upper(), intercaps_header]:
+        value = headers.get(normalized_header)
+        if value:
+            return value
+
+    raise ValueError(f"Could not find {header} header")
+
+
+def get_async_library() -> str:
+    try:
+        return sniffio.current_async_library()
+    except Exception:
+        return "false"
+
+
+def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]:
+    """A version of functools.lru_cache that retains the type signature
+    for the wrapped function arguments.
+    """
+    wrapper = functools.lru_cache(  # noqa: TID251
+        maxsize=maxsize,
+    )
+    return cast(Any, wrapper)  # type: ignore[no-any-return]
+
+
+def json_safe(data: object) -> object:
+    """Translates a mapping / sequence recursively in the same fashion
+    as `pydantic` v2's `model_dump(mode="json")`.
+    """
+    if is_mapping(data):
+        return {json_safe(key): json_safe(value) for key, value in data.items()}
+
+    if is_iterable(data) and not isinstance(data, (str, bytes, bytearray)):
+        return [json_safe(item) for item in data]
+
+    if isinstance(data, (datetime, date)):
+        return data.isoformat()
+
+    return data
+
+
+def is_azure_client(client: object) -> TypeGuard[AzureOpenAI]:
+    from ..lib.azure import AzureOpenAI
+
+    return isinstance(client, AzureOpenAI)
+
+
+def is_async_azure_client(client: object) -> TypeGuard[AsyncAzureOpenAI]:
+    from ..lib.azure import AsyncAzureOpenAI
+
+    return isinstance(client, AsyncAzureOpenAI)
diff --git a/.venv/lib/python3.12/site-packages/openai/_version.py b/.venv/lib/python3.12/site-packages/openai/_version.py
new file mode 100644
index 00000000..a29ce4e8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/_version.py
@@ -0,0 +1,4 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+__title__ = "openai"
+__version__ = "1.68.2"  # x-release-please-version
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/__init__.py
new file mode 100644
index 00000000..d453d5e1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/__init__.py
@@ -0,0 +1 @@
+from ._cli import main as main
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py
new file mode 100644
index 00000000..56a0260a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/__init__.py
@@ -0,0 +1 @@
+from ._main import register_commands as register_commands
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py
new file mode 100644
index 00000000..fe5a5e6f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/_main.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+from argparse import ArgumentParser
+
+from . import chat, audio, files, image, models, completions
+
+
+def register_commands(parser: ArgumentParser) -> None:
+    subparsers = parser.add_subparsers(help="All API subcommands")
+
+    chat.register(subparsers)
+    image.register(subparsers)
+    audio.register(subparsers)
+    files.register(subparsers)
+    models.register(subparsers)
+    completions.register(subparsers)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py
new file mode 100644
index 00000000..269c67df
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/audio.py
@@ -0,0 +1,108 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING, Any, Optional, cast
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from ..._types import NOT_GIVEN
+from .._models import BaseModel
+from .._progress import BufferReader
+from ...types.audio import Transcription
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    # transcriptions
+    sub = subparser.add_parser("audio.transcriptions.create")
+
+    # Required
+    sub.add_argument("-m", "--model", type=str, default="whisper-1")
+    sub.add_argument("-f", "--file", type=str, required=True)
+    # Optional
+    sub.add_argument("--response-format", type=str)
+    sub.add_argument("--language", type=str)
+    sub.add_argument("-t", "--temperature", type=float)
+    sub.add_argument("--prompt", type=str)
+    sub.set_defaults(func=CLIAudio.transcribe, args_model=CLITranscribeArgs)
+
+    # translations
+    sub = subparser.add_parser("audio.translations.create")
+
+    # Required
+    sub.add_argument("-f", "--file", type=str, required=True)
+    # Optional
+    sub.add_argument("-m", "--model", type=str, default="whisper-1")
+    sub.add_argument("--response-format", type=str)
+    # TODO: doesn't seem to be supported by the API
+    # sub.add_argument("--language", type=str)
+    sub.add_argument("-t", "--temperature", type=float)
+    sub.add_argument("--prompt", type=str)
+    sub.set_defaults(func=CLIAudio.translate, args_model=CLITranslationArgs)
+
+
+class CLITranscribeArgs(BaseModel):
+    model: str
+    file: str
+    response_format: Optional[str] = None
+    language: Optional[str] = None
+    temperature: Optional[float] = None
+    prompt: Optional[str] = None
+
+
+class CLITranslationArgs(BaseModel):
+    model: str
+    file: str
+    response_format: Optional[str] = None
+    language: Optional[str] = None
+    temperature: Optional[float] = None
+    prompt: Optional[str] = None
+
+
+class CLIAudio:
+    @staticmethod
+    def transcribe(args: CLITranscribeArgs) -> None:
+        with open(args.file, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        model = cast(
+            "Transcription | str",
+            get_client().audio.transcriptions.create(
+                file=(args.file, buffer_reader),
+                model=args.model,
+                language=args.language or NOT_GIVEN,
+                temperature=args.temperature or NOT_GIVEN,
+                prompt=args.prompt or NOT_GIVEN,
+                # casts required because the API is typed for enums
+                # but we don't want to validate that here for forwards-compat
+                response_format=cast(Any, args.response_format),
+            ),
+        )
+        if isinstance(model, str):
+            sys.stdout.write(model + "\n")
+        else:
+            print_model(model)
+
+    @staticmethod
+    def translate(args: CLITranslationArgs) -> None:
+        with open(args.file, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        model = cast(
+            "Transcription | str",
+            get_client().audio.translations.create(
+                file=(args.file, buffer_reader),
+                model=args.model,
+                temperature=args.temperature or NOT_GIVEN,
+                prompt=args.prompt or NOT_GIVEN,
+                # casts required because the API is typed for enums
+                # but we don't want to validate that here for forwards-compat
+                response_format=cast(Any, args.response_format),
+            ),
+        )
+        if isinstance(model, str):
+            sys.stdout.write(model + "\n")
+        else:
+            print_model(model)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py
new file mode 100644
index 00000000..87d97163
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/__init__.py
@@ -0,0 +1,13 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from . import completions
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    completions.register(subparser)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py
new file mode 100644
index 00000000..344eeff3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/chat/completions.py
@@ -0,0 +1,160 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING, List, Optional, cast
+from argparse import ArgumentParser
+from typing_extensions import Literal, NamedTuple
+
+from ..._utils import get_client
+from ..._models import BaseModel
+from ...._streaming import Stream
+from ....types.chat import (
+    ChatCompletionRole,
+    ChatCompletionChunk,
+    CompletionCreateParams,
+)
+from ....types.chat.completion_create_params import (
+    CompletionCreateParamsStreaming,
+    CompletionCreateParamsNonStreaming,
+)
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("chat.completions.create")
+
+    sub._action_groups.pop()
+    req = sub.add_argument_group("required arguments")
+    opt = sub.add_argument_group("optional arguments")
+
+    req.add_argument(
+        "-g",
+        "--message",
+        action="append",
+        nargs=2,
+        metavar=("ROLE", "CONTENT"),
+        help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.",
+        required=True,
+    )
+    req.add_argument(
+        "-m",
+        "--model",
+        help="The model to use.",
+        required=True,
+    )
+
+    opt.add_argument(
+        "-n",
+        "--n",
+        help="How many completions to generate for the conversation.",
+        type=int,
+    )
+    opt.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int)
+    opt.add_argument(
+        "-t",
+        "--temperature",
+        help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+
+Mutually exclusive with `top_p`.""",
+        type=float,
+    )
+    opt.add_argument(
+        "-P",
+        "--top_p",
+        help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
+
+            Mutually exclusive with `temperature`.""",
+        type=float,
+    )
+    opt.add_argument(
+        "--stop",
+        help="A stop sequence at which to stop generating tokens for the message.",
+    )
+    opt.add_argument("--stream", help="Stream messages as they're ready.", action="store_true")
+    sub.set_defaults(func=CLIChatCompletion.create, args_model=CLIChatCompletionCreateArgs)
+
+
+class CLIMessage(NamedTuple):
+    role: ChatCompletionRole
+    content: str
+
+
+class CLIChatCompletionCreateArgs(BaseModel):
+    message: List[CLIMessage]
+    model: str
+    n: Optional[int] = None
+    max_tokens: Optional[int] = None
+    temperature: Optional[float] = None
+    top_p: Optional[float] = None
+    stop: Optional[str] = None
+    stream: bool = False
+
+
+class CLIChatCompletion:
+    @staticmethod
+    def create(args: CLIChatCompletionCreateArgs) -> None:
+        params: CompletionCreateParams = {
+            "model": args.model,
+            "messages": [
+                {"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message
+            ],
+            # type checkers are not good at inferring union types so we have to set stream afterwards
+            "stream": False,
+        }
+        if args.temperature is not None:
+            params["temperature"] = args.temperature
+        if args.stop is not None:
+            params["stop"] = args.stop
+        if args.top_p is not None:
+            params["top_p"] = args.top_p
+        if args.n is not None:
+            params["n"] = args.n
+        if args.stream:
+            params["stream"] = args.stream  # type: ignore
+        if args.max_tokens is not None:
+            params["max_tokens"] = args.max_tokens
+
+        if args.stream:
+            return CLIChatCompletion._stream_create(cast(CompletionCreateParamsStreaming, params))
+
+        return CLIChatCompletion._create(cast(CompletionCreateParamsNonStreaming, params))
+
+    @staticmethod
+    def _create(params: CompletionCreateParamsNonStreaming) -> None:
+        completion = get_client().chat.completions.create(**params)
+        should_print_header = len(completion.choices) > 1
+        for choice in completion.choices:
+            if should_print_header:
+                sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
+
+            content = choice.message.content if choice.message.content is not None else "None"
+            sys.stdout.write(content)
+
+            if should_print_header or not content.endswith("\n"):
+                sys.stdout.write("\n")
+
+            sys.stdout.flush()
+
+    @staticmethod
+    def _stream_create(params: CompletionCreateParamsStreaming) -> None:
+        # cast is required for mypy
+        stream = cast(  # pyright: ignore[reportUnnecessaryCast]
+            Stream[ChatCompletionChunk], get_client().chat.completions.create(**params)
+        )
+        for chunk in stream:
+            should_print_header = len(chunk.choices) > 1
+            for choice in chunk.choices:
+                if should_print_header:
+                    sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
+
+                content = choice.delta.content or ""
+                sys.stdout.write(content)
+
+                if should_print_header:
+                    sys.stdout.write("\n")
+
+                sys.stdout.flush()
+
+        sys.stdout.write("\n")
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py
new file mode 100644
index 00000000..cbdb35bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/completions.py
@@ -0,0 +1,173 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING, Optional, cast
+from argparse import ArgumentParser
+from functools import partial
+
+from openai.types.completion import Completion
+
+from .._utils import get_client
+from ..._types import NOT_GIVEN, NotGivenOr
+from ..._utils import is_given
+from .._errors import CLIError
+from .._models import BaseModel
+from ..._streaming import Stream
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("completions.create")
+
+    # Required
+    sub.add_argument(
+        "-m",
+        "--model",
+        help="The model to use",
+        required=True,
+    )
+
+    # Optional
+    sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
+    sub.add_argument("--stream", help="Stream tokens as they're ready.", action="store_true")
+    sub.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate", type=int)
+    sub.add_argument(
+        "-t",
+        "--temperature",
+        help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+
+Mutually exclusive with `top_p`.""",
+        type=float,
+    )
+    sub.add_argument(
+        "-P",
+        "--top_p",
+        help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
+
+            Mutually exclusive with `temperature`.""",
+        type=float,
+    )
+    sub.add_argument(
+        "-n",
+        "--n",
+        help="How many sub-completions to generate for each prompt.",
+        type=int,
+    )
+    sub.add_argument(
+        "--logprobs",
+        help="Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
+        type=int,
+    )
+    sub.add_argument(
+        "--best_of",
+        help="Generates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.",
+        type=int,
+    )
+    sub.add_argument(
+        "--echo",
+        help="Echo back the prompt in addition to the completion",
+        action="store_true",
+    )
+    sub.add_argument(
+        "--frequency_penalty",
+        help="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
+        type=float,
+    )
+    sub.add_argument(
+        "--presence_penalty",
+        help="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
+        type=float,
+    )
+    sub.add_argument("--suffix", help="The suffix that comes after a completion of inserted text.")
+    sub.add_argument("--stop", help="A stop sequence at which to stop generating tokens.")
+    sub.add_argument(
+        "--user",
+        help="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.",
+    )
+    # TODO: add support for logit_bias
+    sub.set_defaults(func=CLICompletions.create, args_model=CLICompletionCreateArgs)
+
+
+class CLICompletionCreateArgs(BaseModel):
+    model: str
+    stream: bool = False
+
+    prompt: Optional[str] = None
+    n: NotGivenOr[int] = NOT_GIVEN
+    stop: NotGivenOr[str] = NOT_GIVEN
+    user: NotGivenOr[str] = NOT_GIVEN
+    echo: NotGivenOr[bool] = NOT_GIVEN
+    suffix: NotGivenOr[str] = NOT_GIVEN
+    best_of: NotGivenOr[int] = NOT_GIVEN
+    top_p: NotGivenOr[float] = NOT_GIVEN
+    logprobs: NotGivenOr[int] = NOT_GIVEN
+    max_tokens: NotGivenOr[int] = NOT_GIVEN
+    temperature: NotGivenOr[float] = NOT_GIVEN
+    presence_penalty: NotGivenOr[float] = NOT_GIVEN
+    frequency_penalty: NotGivenOr[float] = NOT_GIVEN
+
+
+class CLICompletions:
+    @staticmethod
+    def create(args: CLICompletionCreateArgs) -> None:
+        if is_given(args.n) and args.n > 1 and args.stream:
+            raise CLIError("Can't stream completions with n>1 with the current CLI")
+
+        make_request = partial(
+            get_client().completions.create,
+            n=args.n,
+            echo=args.echo,
+            stop=args.stop,
+            user=args.user,
+            model=args.model,
+            top_p=args.top_p,
+            prompt=args.prompt,
+            suffix=args.suffix,
+            best_of=args.best_of,
+            logprobs=args.logprobs,
+            max_tokens=args.max_tokens,
+            temperature=args.temperature,
+            presence_penalty=args.presence_penalty,
+            frequency_penalty=args.frequency_penalty,
+        )
+
+        if args.stream:
+            return CLICompletions._stream_create(
+                # mypy doesn't understand the `partial` function but pyright does
+                cast(Stream[Completion], make_request(stream=True))  # pyright: ignore[reportUnnecessaryCast]
+            )
+
+        return CLICompletions._create(make_request())
+
+    @staticmethod
+    def _create(completion: Completion) -> None:
+        should_print_header = len(completion.choices) > 1
+        for choice in completion.choices:
+            if should_print_header:
+                sys.stdout.write("===== Completion {} =====\n".format(choice.index))
+
+            sys.stdout.write(choice.text)
+
+            if should_print_header or not choice.text.endswith("\n"):
+                sys.stdout.write("\n")
+
+            sys.stdout.flush()
+
+    @staticmethod
+    def _stream_create(stream: Stream[Completion]) -> None:
+        for completion in stream:
+            should_print_header = len(completion.choices) > 1
+            for choice in sorted(completion.choices, key=lambda c: c.index):
+                if should_print_header:
+                    sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
+
+                sys.stdout.write(choice.text)
+
+                if should_print_header:
+                    sys.stdout.write("\n")
+
+                sys.stdout.flush()
+
+        sys.stdout.write("\n")
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/files.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/files.py
new file mode 100644
index 00000000..5f3631b2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/files.py
@@ -0,0 +1,80 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, cast
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from .._models import BaseModel
+from .._progress import BufferReader
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("files.create")
+
+    sub.add_argument(
+        "-f",
+        "--file",
+        required=True,
+        help="File to upload",
+    )
+    sub.add_argument(
+        "-p",
+        "--purpose",
+        help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)",
+        required=True,
+    )
+    sub.set_defaults(func=CLIFile.create, args_model=CLIFileCreateArgs)
+
+    sub = subparser.add_parser("files.retrieve")
+    sub.add_argument("-i", "--id", required=True, help="The files ID")
+    sub.set_defaults(func=CLIFile.get, args_model=CLIFileCreateArgs)
+
+    sub = subparser.add_parser("files.delete")
+    sub.add_argument("-i", "--id", required=True, help="The files ID")
+    sub.set_defaults(func=CLIFile.delete, args_model=CLIFileCreateArgs)
+
+    sub = subparser.add_parser("files.list")
+    sub.set_defaults(func=CLIFile.list)
+
+
+class CLIFileIDArgs(BaseModel):
+    id: str
+
+
+class CLIFileCreateArgs(BaseModel):
+    file: str
+    purpose: str
+
+
+class CLIFile:
+    @staticmethod
+    def create(args: CLIFileCreateArgs) -> None:
+        with open(args.file, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        file = get_client().files.create(
+            file=(args.file, buffer_reader),
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            purpose=cast(Any, args.purpose),
+        )
+        print_model(file)
+
+    @staticmethod
+    def get(args: CLIFileIDArgs) -> None:
+        file = get_client().files.retrieve(file_id=args.id)
+        print_model(file)
+
+    @staticmethod
+    def delete(args: CLIFileIDArgs) -> None:
+        file = get_client().files.delete(file_id=args.id)
+        print_model(file)
+
+    @staticmethod
+    def list() -> None:
+        files = get_client().files.list()
+        for file in files:
+            print_model(file)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/image.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/image.py
new file mode 100644
index 00000000..3e2a0a90
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/image.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, cast
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from ..._types import NOT_GIVEN, NotGiven, NotGivenOr
+from .._models import BaseModel
+from .._progress import BufferReader
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("images.generate")
+    sub.add_argument("-m", "--model", type=str)
+    sub.add_argument("-p", "--prompt", type=str, required=True)
+    sub.add_argument("-n", "--num-images", type=int, default=1)
+    sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
+    sub.add_argument("--response-format", type=str, default="url")
+    sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs)
+
+    sub = subparser.add_parser("images.edit")
+    sub.add_argument("-m", "--model", type=str)
+    sub.add_argument("-p", "--prompt", type=str, required=True)
+    sub.add_argument("-n", "--num-images", type=int, default=1)
+    sub.add_argument(
+        "-I",
+        "--image",
+        type=str,
+        required=True,
+        help="Image to modify. Should be a local path and a PNG encoded image.",
+    )
+    sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
+    sub.add_argument("--response-format", type=str, default="url")
+    sub.add_argument(
+        "-M",
+        "--mask",
+        type=str,
+        required=False,
+        help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.",
+    )
+    sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs)
+
+    sub = subparser.add_parser("images.create_variation")
+    sub.add_argument("-m", "--model", type=str)
+    sub.add_argument("-n", "--num-images", type=int, default=1)
+    sub.add_argument(
+        "-I",
+        "--image",
+        type=str,
+        required=True,
+        help="Image to modify. Should be a local path and a PNG encoded image.",
+    )
+    sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
+    sub.add_argument("--response-format", type=str, default="url")
+    sub.set_defaults(func=CLIImage.create_variation, args_model=CLIImageCreateVariationArgs)
+
+
+class CLIImageCreateArgs(BaseModel):
+    prompt: str
+    num_images: int
+    size: str
+    response_format: str
+    model: NotGivenOr[str] = NOT_GIVEN
+
+
+class CLIImageCreateVariationArgs(BaseModel):
+    image: str
+    num_images: int
+    size: str
+    response_format: str
+    model: NotGivenOr[str] = NOT_GIVEN
+
+
+class CLIImageEditArgs(BaseModel):
+    image: str
+    num_images: int
+    size: str
+    response_format: str
+    prompt: str
+    mask: NotGivenOr[str] = NOT_GIVEN
+    model: NotGivenOr[str] = NOT_GIVEN
+
+
+class CLIImage:
+    @staticmethod
+    def create(args: CLIImageCreateArgs) -> None:
+        image = get_client().images.generate(
+            model=args.model,
+            prompt=args.prompt,
+            n=args.num_images,
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            size=cast(Any, args.size),
+            response_format=cast(Any, args.response_format),
+        )
+        print_model(image)
+
+    @staticmethod
+    def create_variation(args: CLIImageCreateVariationArgs) -> None:
+        with open(args.image, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
+
+        image = get_client().images.create_variation(
+            model=args.model,
+            image=("image", buffer_reader),
+            n=args.num_images,
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            size=cast(Any, args.size),
+            response_format=cast(Any, args.response_format),
+        )
+        print_model(image)
+
+    @staticmethod
+    def edit(args: CLIImageEditArgs) -> None:
+        with open(args.image, "rb") as file_reader:
+            buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress")
+
+        if isinstance(args.mask, NotGiven):
+            mask: NotGivenOr[BufferReader] = NOT_GIVEN
+        else:
+            with open(args.mask, "rb") as file_reader:
+                mask = BufferReader(file_reader.read(), desc="Mask progress")
+
+        image = get_client().images.edit(
+            model=args.model,
+            prompt=args.prompt,
+            image=("image", buffer_reader),
+            n=args.num_images,
+            mask=("mask", mask) if not isinstance(mask, NotGiven) else mask,
+            # casts required because the API is typed for enums
+            # but we don't want to validate that here for forwards-compat
+            size=cast(Any, args.size),
+            response_format=cast(Any, args.response_format),
+        )
+        print_model(image)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_api/models.py b/.venv/lib/python3.12/site-packages/openai/cli/_api/models.py
new file mode 100644
index 00000000..017218fa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_api/models.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from .._utils import get_client, print_model
+from .._models import BaseModel
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("models.list")
+    sub.set_defaults(func=CLIModels.list)
+
+    sub = subparser.add_parser("models.retrieve")
+    sub.add_argument("-i", "--id", required=True, help="The model ID")
+    sub.set_defaults(func=CLIModels.get, args_model=CLIModelIDArgs)
+
+    sub = subparser.add_parser("models.delete")
+    sub.add_argument("-i", "--id", required=True, help="The model ID")
+    sub.set_defaults(func=CLIModels.delete, args_model=CLIModelIDArgs)
+
+
+class CLIModelIDArgs(BaseModel):
+    id: str
+
+
+class CLIModels:
+    @staticmethod
+    def get(args: CLIModelIDArgs) -> None:
+        model = get_client().models.retrieve(model=args.id)
+        print_model(model)
+
+    @staticmethod
+    def delete(args: CLIModelIDArgs) -> None:
+        model = get_client().models.delete(model=args.id)
+        print_model(model)
+
+    @staticmethod
+    def list() -> None:
+        models = get_client().models.list()
+        for model in models:
+            print_model(model)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_cli.py b/.venv/lib/python3.12/site-packages/openai/cli/_cli.py
new file mode 100644
index 00000000..fd165f48
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_cli.py
@@ -0,0 +1,233 @@
+from __future__ import annotations
+
+import sys
+import logging
+import argparse
+from typing import Any, List, Type, Optional
+from typing_extensions import ClassVar
+
+import httpx
+import pydantic
+
+import openai
+
+from . import _tools
+from .. import _ApiType, __version__
+from ._api import register_commands
+from ._utils import can_use_http2
+from ._errors import CLIError, display_error
+from .._compat import PYDANTIC_V2, ConfigDict, model_parse
+from .._models import BaseModel
+from .._exceptions import APIError
+
+logger = logging.getLogger()
+formatter = logging.Formatter("[%(asctime)s] %(message)s")
+handler = logging.StreamHandler(sys.stderr)
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
+
+class Arguments(BaseModel):
+    if PYDANTIC_V2:
+        model_config: ClassVar[ConfigDict] = ConfigDict(
+            extra="ignore",
+        )
+    else:
+
+        class Config(pydantic.BaseConfig):  # type: ignore
+            extra: Any = pydantic.Extra.ignore  # type: ignore
+
+    verbosity: int
+    version: Optional[str] = None
+
+    api_key: Optional[str]
+    api_base: Optional[str]
+    organization: Optional[str]
+    proxy: Optional[List[str]]
+    api_type: Optional[_ApiType] = None
+    api_version: Optional[str] = None
+
+    # azure
+    azure_endpoint: Optional[str] = None
+    azure_ad_token: Optional[str] = None
+
+    # internal, set by subparsers to parse their specific args
+    args_model: Optional[Type[BaseModel]] = None
+
+    # internal, used so that subparsers can forward unknown arguments
+    unknown_args: List[str] = []
+    allow_unknown_args: bool = False
+
+
+def _build_parser() -> argparse.ArgumentParser:
+    parser = argparse.ArgumentParser(description=None, prog="openai")
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        action="count",
+        dest="verbosity",
+        default=0,
+        help="Set verbosity.",
+    )
+    parser.add_argument("-b", "--api-base", help="What API base url to use.")
+    parser.add_argument("-k", "--api-key", help="What API key to use.")
+    parser.add_argument("-p", "--proxy", nargs="+", help="What proxy to use.")
+    parser.add_argument(
+        "-o",
+        "--organization",
+        help="Which organization to run as (will use your default organization if not specified)",
+    )
+    parser.add_argument(
+        "-t",
+        "--api-type",
+        type=str,
+        choices=("openai", "azure"),
+        help="The backend API to call, must be `openai` or `azure`",
+    )
+    parser.add_argument(
+        "--api-version",
+        help="The Azure API version, e.g. 'https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning'",
+    )
+
+    # azure
+    parser.add_argument(
+        "--azure-endpoint",
+        help="The Azure endpoint, e.g. 'https://endpoint.openai.azure.com'",
+    )
+    parser.add_argument(
+        "--azure-ad-token",
+        help="A token from Azure Active Directory, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id",
+    )
+
+    # prints the package version
+    parser.add_argument(
+        "-V",
+        "--version",
+        action="version",
+        version="%(prog)s " + __version__,
+    )
+
+    def help() -> None:
+        parser.print_help()
+
+    parser.set_defaults(func=help)
+
+    subparsers = parser.add_subparsers()
+    sub_api = subparsers.add_parser("api", help="Direct API calls")
+
+    register_commands(sub_api)
+
+    sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
+    _tools.register_commands(sub_tools, subparsers)
+
+    return parser
+
+
+def main() -> int:
+    try:
+        _main()
+    except (APIError, CLIError, pydantic.ValidationError) as err:
+        display_error(err)
+        return 1
+    except KeyboardInterrupt:
+        sys.stderr.write("\n")
+        return 1
+    return 0
+
+
+def _parse_args(parser: argparse.ArgumentParser) -> tuple[argparse.Namespace, Arguments, list[str]]:
+    # argparse by default will strip out the `--` but we want to keep it for unknown arguments
+    if "--" in sys.argv:
+        idx = sys.argv.index("--")
+        known_args = sys.argv[1:idx]
+        unknown_args = sys.argv[idx:]
+    else:
+        known_args = sys.argv[1:]
+        unknown_args = []
+
+    parsed, remaining_unknown = parser.parse_known_args(known_args)
+
+    # append any remaining unknown arguments from the initial parsing
+    remaining_unknown.extend(unknown_args)
+
+    args = model_parse(Arguments, vars(parsed))
+    if not args.allow_unknown_args:
+        # we have to parse twice to ensure any unknown arguments
+        # result in an error if that behaviour is desired
+        parser.parse_args()
+
+    return parsed, args, remaining_unknown
+
+
+def _main() -> None:
+    parser = _build_parser()
+    parsed, args, unknown = _parse_args(parser)
+
+    if args.verbosity != 0:
+        sys.stderr.write("Warning: --verbosity isn't supported yet\n")
+
+    proxies: dict[str, httpx.BaseTransport] = {}
+    if args.proxy is not None:
+        for proxy in args.proxy:
+            key = "https://" if proxy.startswith("https") else "http://"
+            if key in proxies:
+                raise CLIError(f"Multiple {key} proxies given - only the last one would be used")
+
+            proxies[key] = httpx.HTTPTransport(proxy=httpx.Proxy(httpx.URL(proxy)))
+
+    http_client = httpx.Client(
+        mounts=proxies or None,
+        http2=can_use_http2(),
+    )
+    openai.http_client = http_client
+
+    if args.organization:
+        openai.organization = args.organization
+
+    if args.api_key:
+        openai.api_key = args.api_key
+
+    if args.api_base:
+        openai.base_url = args.api_base
+
+    # azure
+    if args.api_type is not None:
+        openai.api_type = args.api_type
+
+    if args.azure_endpoint is not None:
+        openai.azure_endpoint = args.azure_endpoint
+
+    if args.api_version is not None:
+        openai.api_version = args.api_version
+
+    if args.azure_ad_token is not None:
+        openai.azure_ad_token = args.azure_ad_token
+
+    try:
+        if args.args_model:
+            parsed.func(
+                model_parse(
+                    args.args_model,
+                    {
+                        **{
+                            # we omit None values so that they can be defaulted to `NotGiven`
+                            # and we'll strip it from the API request
+                            key: value
+                            for key, value in vars(parsed).items()
+                            if value is not None
+                        },
+                        "unknown_args": unknown,
+                    },
+                )
+            )
+        else:
+            parsed.func()
+    finally:
+        try:
+            http_client.close()
+        except Exception:
+            pass
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_errors.py b/.venv/lib/python3.12/site-packages/openai/cli/_errors.py
new file mode 100644
index 00000000..7d0292da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_errors.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+import sys
+
+import pydantic
+
+from ._utils import Colors, organization_info
+from .._exceptions import APIError, OpenAIError
+
+
+class CLIError(OpenAIError): ...
+
+
+class SilentCLIError(CLIError): ...
+
+
+def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None:
+    if isinstance(err, SilentCLIError):
+        return
+
+    sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err))
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_models.py b/.venv/lib/python3.12/site-packages/openai/cli/_models.py
new file mode 100644
index 00000000..5583db26
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_models.py
@@ -0,0 +1,17 @@
+from typing import Any
+from typing_extensions import ClassVar
+
+import pydantic
+
+from .. import _models
+from .._compat import PYDANTIC_V2, ConfigDict
+
+
+class BaseModel(_models.BaseModel):
+    if PYDANTIC_V2:
+        model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True)
+    else:
+
+        class Config(pydantic.BaseConfig):  # type: ignore
+            extra: Any = pydantic.Extra.ignore  # type: ignore
+            arbitrary_types_allowed: bool = True
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_progress.py b/.venv/lib/python3.12/site-packages/openai/cli/_progress.py
new file mode 100644
index 00000000..8a7f2525
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_progress.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import io
+from typing import Callable
+from typing_extensions import override
+
+
+class CancelledError(Exception):
+    def __init__(self, msg: str) -> None:
+        self.msg = msg
+        super().__init__(msg)
+
+    @override
+    def __str__(self) -> str:
+        return self.msg
+
+    __repr__ = __str__
+
+
+class BufferReader(io.BytesIO):
+    def __init__(self, buf: bytes = b"", desc: str | None = None) -> None:
+        super().__init__(buf)
+        self._len = len(buf)
+        self._progress = 0
+        self._callback = progress(len(buf), desc=desc)
+
+    def __len__(self) -> int:
+        return self._len
+
+    @override
+    def read(self, n: int | None = -1) -> bytes:
+        chunk = io.BytesIO.read(self, n)
+        self._progress += len(chunk)
+
+        try:
+            self._callback(self._progress)
+        except Exception as e:  # catches exception from the callback
+            raise CancelledError("The upload was cancelled: {}".format(e)) from e
+
+        return chunk
+
+
+def progress(total: float, desc: str | None) -> Callable[[float], None]:
+    import tqdm
+
+    meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc)
+
+    def incr(progress: float) -> None:
+        meter.n = progress
+        if progress == total:
+            meter.close()
+        else:
+            meter.refresh()
+
+    return incr
+
+
+def MB(i: int) -> int:
+    return int(i // 1024**2)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py
new file mode 100644
index 00000000..56a0260a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/__init__.py
@@ -0,0 +1 @@
+from ._main import register_commands as register_commands
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py
new file mode 100644
index 00000000..bd6cda40
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/_main.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from . import migrate, fine_tunes
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register_commands(parser: ArgumentParser, subparser: _SubParsersAction[ArgumentParser]) -> None:
+    migrate.register(subparser)
+
+    namespaced = parser.add_subparsers(title="Tools", help="Convenience client side tools")
+
+    fine_tunes.register(namespaced)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py
new file mode 100644
index 00000000..2128b889
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/fine_tunes.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING
+from argparse import ArgumentParser
+
+from .._models import BaseModel
+from ...lib._validators import (
+    get_validators,
+    write_out_file,
+    read_any_format,
+    apply_validators,
+    apply_necessary_remediation,
+)
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("fine_tunes.prepare_data")
+    sub.add_argument(
+        "-f",
+        "--file",
+        required=True,
+        help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing prompt-completion examples to be analyzed."
+        "This should be the local file path.",
+    )
+    sub.add_argument(
+        "-q",
+        "--quiet",
+        required=False,
+        action="store_true",
+        help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
+    )
+    sub.set_defaults(func=prepare_data, args_model=PrepareDataArgs)
+
+
+class PrepareDataArgs(BaseModel):
+    file: str
+
+    quiet: bool
+
+
+def prepare_data(args: PrepareDataArgs) -> None:
+    sys.stdout.write("Analyzing...\n")
+    fname = args.file
+    auto_accept = args.quiet
+    df, remediation = read_any_format(fname)
+    apply_necessary_remediation(None, remediation)
+
+    validators = get_validators()
+
+    assert df is not None
+
+    apply_validators(
+        df,
+        fname,
+        remediation,
+        validators,
+        auto_accept,
+        write_out_file_func=write_out_file,
+    )
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py b/.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py
new file mode 100644
index 00000000..841b7775
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_tools/migrate.py
@@ -0,0 +1,164 @@
+from __future__ import annotations
+
+import os
+import sys
+import shutil
+import tarfile
+import platform
+import subprocess
+from typing import TYPE_CHECKING, List
+from pathlib import Path
+from argparse import ArgumentParser
+
+import httpx
+
+from .._errors import CLIError, SilentCLIError
+from .._models import BaseModel
+
+if TYPE_CHECKING:
+    from argparse import _SubParsersAction
+
+
+def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
+    sub = subparser.add_parser("migrate")
+    sub.set_defaults(func=migrate, args_model=MigrateArgs, allow_unknown_args=True)
+
+    sub = subparser.add_parser("grit")
+    sub.set_defaults(func=grit, args_model=GritArgs, allow_unknown_args=True)
+
+
+class GritArgs(BaseModel):
+    # internal
+    unknown_args: List[str] = []
+
+
+def grit(args: GritArgs) -> None:
+    grit_path = install()
+
+    try:
+        subprocess.check_call([grit_path, *args.unknown_args])
+    except subprocess.CalledProcessError:
+        # stdout and stderr are forwarded by subprocess so an error will already
+        # have been displayed
+        raise SilentCLIError() from None
+
+
+class MigrateArgs(BaseModel):
+    # internal
+    unknown_args: List[str] = []
+
+
+def migrate(args: MigrateArgs) -> None:
+    grit_path = install()
+
+    try:
+        subprocess.check_call([grit_path, "apply", "openai", *args.unknown_args])
+    except subprocess.CalledProcessError:
+        # stdout and stderr are forwarded by subprocess so an error will already
+        # have been displayed
+        raise SilentCLIError() from None
+
+
+# handles downloading the Grit CLI until they provide their own PyPi package
+
+KEYGEN_ACCOUNT = "custodian-dev"
+
+
+def _cache_dir() -> Path:
+    xdg = os.environ.get("XDG_CACHE_HOME")
+    if xdg is not None:
+        return Path(xdg)
+
+    return Path.home() / ".cache"
+
+
+def _debug(message: str) -> None:
+    if not os.environ.get("DEBUG"):
+        return
+
+    sys.stdout.write(f"[DEBUG]: {message}\n")
+
+
+def install() -> Path:
+    """Installs the Grit CLI and returns the location of the binary"""
+    if sys.platform == "win32":
+        raise CLIError("Windows is not supported yet in the migration CLI")
+
+    _debug("Using Grit installer from GitHub")
+
+    platform = "apple-darwin" if sys.platform == "darwin" else "unknown-linux-gnu"
+
+    dir_name = _cache_dir() / "openai-python"
+    install_dir = dir_name / ".install"
+    target_dir = install_dir / "bin"
+
+    target_path = target_dir / "grit"
+    temp_file = target_dir / "grit.tmp"
+
+    if target_path.exists():
+        _debug(f"{target_path} already exists")
+        sys.stdout.flush()
+        return target_path
+
+    _debug(f"Using Grit CLI path: {target_path}")
+
+    target_dir.mkdir(parents=True, exist_ok=True)
+
+    if temp_file.exists():
+        temp_file.unlink()
+
+    arch = _get_arch()
+    _debug(f"Using architecture {arch}")
+
+    file_name = f"grit-{arch}-{platform}"
+    download_url = f"https://github.com/getgrit/gritql/releases/latest/download/{file_name}.tar.gz"
+
+    sys.stdout.write(f"Downloading Grit CLI from {download_url}\n")
+    with httpx.Client() as client:
+        download_response = client.get(download_url, follow_redirects=True)
+        if download_response.status_code != 200:
+            raise CLIError(f"Failed to download Grit CLI from {download_url}")
+        with open(temp_file, "wb") as file:
+            for chunk in download_response.iter_bytes():
+                file.write(chunk)
+
+    unpacked_dir = target_dir / "cli-bin"
+    unpacked_dir.mkdir(parents=True, exist_ok=True)
+
+    with tarfile.open(temp_file, "r:gz") as archive:
+        if sys.version_info >= (3, 12):
+            archive.extractall(unpacked_dir, filter="data")
+        else:
+            archive.extractall(unpacked_dir)
+
+    _move_files_recursively(unpacked_dir, target_dir)
+
+    shutil.rmtree(unpacked_dir)
+    os.remove(temp_file)
+    os.chmod(target_path, 0o755)
+
+    sys.stdout.flush()
+
+    return target_path
+
+
+def _move_files_recursively(source_dir: Path, target_dir: Path) -> None:
+    for item in source_dir.iterdir():
+        if item.is_file():
+            item.rename(target_dir / item.name)
+        elif item.is_dir():
+            _move_files_recursively(item, target_dir)
+
+
+def _get_arch() -> str:
+    architecture = platform.machine().lower()
+
+    # Map the architecture names to Grit equivalents
+    arch_map = {
+        "x86_64": "x86_64",
+        "amd64": "x86_64",
+        "armv7l": "aarch64",
+        "arm64": "aarch64",
+    }
+
+    return arch_map.get(architecture, architecture)
diff --git a/.venv/lib/python3.12/site-packages/openai/cli/_utils.py b/.venv/lib/python3.12/site-packages/openai/cli/_utils.py
new file mode 100644
index 00000000..673eed61
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/cli/_utils.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import sys
+
+import openai
+
+from .. import OpenAI, _load_client
+from .._compat import model_json
+from .._models import BaseModel
+
+
+class Colors:
+    HEADER = "\033[95m"
+    OKBLUE = "\033[94m"
+    OKGREEN = "\033[92m"
+    WARNING = "\033[93m"
+    FAIL = "\033[91m"
+    ENDC = "\033[0m"
+    BOLD = "\033[1m"
+    UNDERLINE = "\033[4m"
+
+
+def get_client() -> OpenAI:
+    return _load_client()
+
+
+def organization_info() -> str:
+    organization = openai.organization
+    if organization is not None:
+        return "[organization={}] ".format(organization)
+
+    return ""
+
+
+def print_model(model: BaseModel) -> None:
+    sys.stdout.write(model_json(model, indent=2) + "\n")
+
+
+def can_use_http2() -> bool:
+    try:
+        import h2  # type: ignore  # noqa
+    except ImportError:
+        return False
+
+    return True
diff --git a/.venv/lib/python3.12/site-packages/openai/helpers/__init__.py b/.venv/lib/python3.12/site-packages/openai/helpers/__init__.py
new file mode 100644
index 00000000..ab3044da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/helpers/__init__.py
@@ -0,0 +1,4 @@
+from .microphone import Microphone
+from .local_audio_player import LocalAudioPlayer
+
+__all__ = ["Microphone", "LocalAudioPlayer"]
diff --git a/.venv/lib/python3.12/site-packages/openai/helpers/local_audio_player.py b/.venv/lib/python3.12/site-packages/openai/helpers/local_audio_player.py
new file mode 100644
index 00000000..eed68aa2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/helpers/local_audio_player.py
@@ -0,0 +1,165 @@
+# mypy: ignore-errors
+from __future__ import annotations
+
+import queue
+import asyncio
+from typing import Any, Union, Callable, AsyncGenerator, cast
+from typing_extensions import TYPE_CHECKING
+
+from .. import _legacy_response
+from .._extras import numpy as np, sounddevice as sd
+from .._response import StreamedBinaryAPIResponse, AsyncStreamedBinaryAPIResponse
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+
+SAMPLE_RATE = 24000
+
+
+class LocalAudioPlayer:
+    def __init__(
+        self,
+        should_stop: Union[Callable[[], bool], None] = None,
+    ):
+        self.channels = 1
+        self.dtype = np.float32
+        self.should_stop = should_stop
+
+    async def _tts_response_to_buffer(
+        self,
+        response: Union[
+            _legacy_response.HttpxBinaryResponseContent,
+            AsyncStreamedBinaryAPIResponse,
+            StreamedBinaryAPIResponse,
+        ],
+    ) -> npt.NDArray[np.float32]:
+        chunks: list[bytes] = []
+        if isinstance(response, _legacy_response.HttpxBinaryResponseContent) or isinstance(
+            response, StreamedBinaryAPIResponse
+        ):
+            for chunk in response.iter_bytes(chunk_size=1024):
+                if chunk:
+                    chunks.append(chunk)
+        else:
+            async for chunk in response.iter_bytes(chunk_size=1024):
+                if chunk:
+                    chunks.append(chunk)
+
+        audio_bytes = b"".join(chunks)
+        audio_np = np.frombuffer(audio_bytes, dtype=np.int16).astype(np.float32) / 32767.0
+        audio_np = audio_np.reshape(-1, 1)
+        return audio_np
+
+    async def play(
+        self,
+        input: Union[
+            npt.NDArray[np.int16],
+            npt.NDArray[np.float32],
+            _legacy_response.HttpxBinaryResponseContent,
+            AsyncStreamedBinaryAPIResponse,
+            StreamedBinaryAPIResponse,
+        ],
+    ) -> None:
+        audio_content: npt.NDArray[np.float32]
+        if isinstance(input, np.ndarray):
+            if input.dtype == np.int16 and self.dtype == np.float32:
+                audio_content = (input.astype(np.float32) / 32767.0).reshape(-1, self.channels)
+            elif input.dtype == np.float32:
+                audio_content = cast('npt.NDArray[np.float32]', input)
+            else:
+                raise ValueError(f"Unsupported dtype: {input.dtype}")
+        else:
+            audio_content = await self._tts_response_to_buffer(input)
+
+        loop = asyncio.get_event_loop()
+        event = asyncio.Event()
+        idx = 0
+
+        def callback(
+            outdata: npt.NDArray[np.float32],
+            frame_count: int,
+            _time_info: Any,
+            _status: Any,
+        ):
+            nonlocal idx
+
+            remainder = len(audio_content) - idx
+            if remainder == 0 or (callable(self.should_stop) and self.should_stop()):
+                loop.call_soon_threadsafe(event.set)
+                raise sd.CallbackStop
+            valid_frames = frame_count if remainder >= frame_count else remainder
+            outdata[:valid_frames] = audio_content[idx : idx + valid_frames]
+            outdata[valid_frames:] = 0
+            idx += valid_frames
+
+        stream = sd.OutputStream(
+            samplerate=SAMPLE_RATE,
+            callback=callback,
+            dtype=audio_content.dtype,
+            channels=audio_content.shape[1],
+        )
+        with stream:
+            await event.wait()
+
+    async def play_stream(
+        self,
+        buffer_stream: AsyncGenerator[Union[npt.NDArray[np.float32], npt.NDArray[np.int16], None], None],
+    ) -> None:
+        loop = asyncio.get_event_loop()
+        event = asyncio.Event()
+        buffer_queue: queue.Queue[Union[npt.NDArray[np.float32], npt.NDArray[np.int16], None]] = queue.Queue(maxsize=50)
+
+        async def buffer_producer():
+            async for buffer in buffer_stream:
+                if buffer is None:
+                    break
+                await loop.run_in_executor(None, buffer_queue.put, buffer)
+            await loop.run_in_executor(None, buffer_queue.put, None)  # Signal completion
+
+        def callback(
+            outdata: npt.NDArray[np.float32],
+            frame_count: int,
+            _time_info: Any,
+            _status: Any,
+        ):
+            nonlocal current_buffer, buffer_pos
+
+            frames_written = 0
+            while frames_written < frame_count:
+                if current_buffer is None or buffer_pos >= len(current_buffer):
+                    try:
+                        current_buffer = buffer_queue.get(timeout=0.1)
+                        if current_buffer is None:
+                            loop.call_soon_threadsafe(event.set)
+                            raise sd.CallbackStop
+                        buffer_pos = 0
+
+                        if current_buffer.dtype == np.int16 and self.dtype == np.float32:
+                            current_buffer = (current_buffer.astype(np.float32) / 32767.0).reshape(-1, self.channels)
+
+                    except queue.Empty:
+                        outdata[frames_written:] = 0
+                        return
+
+                remaining_frames = len(current_buffer) - buffer_pos
+                frames_to_write = min(frame_count - frames_written, remaining_frames)
+                outdata[frames_written : frames_written + frames_to_write] = current_buffer[
+                    buffer_pos : buffer_pos + frames_to_write
+                ]
+                buffer_pos += frames_to_write
+                frames_written += frames_to_write
+
+        current_buffer = None
+        buffer_pos = 0
+
+        producer_task = asyncio.create_task(buffer_producer())
+
+        with sd.OutputStream(
+            samplerate=SAMPLE_RATE,
+            channels=self.channels,
+            dtype=self.dtype,
+            callback=callback,
+        ):
+            await event.wait()
+
+        await producer_task
diff --git a/.venv/lib/python3.12/site-packages/openai/helpers/microphone.py b/.venv/lib/python3.12/site-packages/openai/helpers/microphone.py
new file mode 100644
index 00000000..62a6d8d8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/helpers/microphone.py
@@ -0,0 +1,100 @@
+# mypy: ignore-errors
+from __future__ import annotations
+
+import io
+import time
+import wave
+import asyncio
+from typing import Any, Type, Union, Generic, TypeVar, Callable, overload
+from typing_extensions import TYPE_CHECKING, Literal
+
+from .._types import FileTypes, FileContent
+from .._extras import numpy as np, sounddevice as sd
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+
+SAMPLE_RATE = 24000
+
+DType = TypeVar("DType", bound=np.generic)
+
+
+class Microphone(Generic[DType]):
+    def __init__(
+        self,
+        channels: int = 1,
+        dtype: Type[DType] = np.int16,
+        should_record: Union[Callable[[], bool], None] = None,
+        timeout: Union[float, None] = None,
+    ):
+        self.channels = channels
+        self.dtype = dtype
+        self.should_record = should_record
+        self.buffer_chunks = []
+        self.timeout = timeout
+        self.has_record_function = callable(should_record)
+
+    def _ndarray_to_wav(self, audio_data: npt.NDArray[DType]) -> FileTypes:
+        buffer: FileContent = io.BytesIO()
+        with wave.open(buffer, "w") as wav_file:
+            wav_file.setnchannels(self.channels)
+            wav_file.setsampwidth(np.dtype(self.dtype).itemsize)
+            wav_file.setframerate(SAMPLE_RATE)
+            wav_file.writeframes(audio_data.tobytes())
+        buffer.seek(0)
+        return ("audio.wav", buffer, "audio/wav")
+
+    @overload
+    async def record(self, return_ndarray: Literal[True]) -> npt.NDArray[DType]: ...
+
+    @overload
+    async def record(self, return_ndarray: Literal[False]) -> FileTypes: ...
+
+    @overload
+    async def record(self, return_ndarray: None = ...) -> FileTypes: ...
+
+    async def record(self, return_ndarray: Union[bool, None] = False) -> Union[npt.NDArray[DType], FileTypes]:
+        loop = asyncio.get_event_loop()
+        event = asyncio.Event()
+        self.buffer_chunks: list[npt.NDArray[DType]] = []
+        start_time = time.perf_counter()
+
+        def callback(
+            indata: npt.NDArray[DType],
+            _frame_count: int,
+            _time_info: Any,
+            _status: Any,
+        ):
+            execution_time = time.perf_counter() - start_time
+            reached_recording_timeout = execution_time > self.timeout if self.timeout is not None else False
+            if reached_recording_timeout:
+                loop.call_soon_threadsafe(event.set)
+                raise sd.CallbackStop
+
+            should_be_recording = self.should_record() if callable(self.should_record) else True
+            if not should_be_recording:
+                loop.call_soon_threadsafe(event.set)
+                raise sd.CallbackStop
+
+            self.buffer_chunks.append(indata.copy())
+
+        stream = sd.InputStream(
+            callback=callback,
+            dtype=self.dtype,
+            samplerate=SAMPLE_RATE,
+            channels=self.channels,
+        )
+        with stream:
+            await event.wait()
+
+        # Concatenate all chunks into a single buffer, handle empty case
+        concatenated_chunks: npt.NDArray[DType] = (
+            np.concatenate(self.buffer_chunks, axis=0)
+            if len(self.buffer_chunks) > 0
+            else np.array([], dtype=self.dtype)
+        )
+
+        if return_ndarray:
+            return concatenated_chunks
+        else:
+            return self._ndarray_to_wav(concatenated_chunks)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/.keep b/.venv/lib/python3.12/site-packages/openai/lib/.keep
new file mode 100644
index 00000000..5e2c99fd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/.keep
@@ -0,0 +1,4 @@
+File generated from our OpenAPI spec by Stainless.
+
+This directory can be used to store custom files to expand the SDK.
+It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.
\ No newline at end of file
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/__init__.py b/.venv/lib/python3.12/site-packages/openai/lib/__init__.py
new file mode 100644
index 00000000..5c6cb782
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/__init__.py
@@ -0,0 +1,2 @@
+from ._tools import pydantic_function_tool as pydantic_function_tool
+from ._parsing import ResponseFormatT as ResponseFormatT
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/_old_api.py b/.venv/lib/python3.12/site-packages/openai/lib/_old_api.py
new file mode 100644
index 00000000..929c87e8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/_old_api.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+from typing_extensions import override
+
+from .._utils import LazyProxy
+from .._exceptions import OpenAIError
+
+INSTRUCTIONS = """
+
+You tried to access openai.{symbol}, but this is no longer supported in openai>=1.0.0 - see the README at https://github.com/openai/openai-python for the API.
+
+You can run `openai migrate` to automatically upgrade your codebase to use the 1.0.0 interface. 
+
+Alternatively, you can pin your installation to the old version, e.g. `pip install openai==0.28`
+
+A detailed migration guide is available here: https://github.com/openai/openai-python/discussions/742
+"""
+
+
+class APIRemovedInV1(OpenAIError):
+    def __init__(self, *, symbol: str) -> None:
+        super().__init__(INSTRUCTIONS.format(symbol=symbol))
+
+
+class APIRemovedInV1Proxy(LazyProxy[Any]):
+    def __init__(self, *, symbol: str) -> None:
+        super().__init__()
+        self._symbol = symbol
+
+    @override
+    def __load__(self) -> Any:
+        # return the proxy until it is eventually called so that
+        # we don't break people that are just checking the attributes
+        # of a module
+        return self
+
+    def __call__(self, *_args: Any, **_kwargs: Any) -> Any:
+        raise APIRemovedInV1(symbol=self._symbol)
+
+
+SYMBOLS = [
+    "Edit",
+    "File",
+    "Audio",
+    "Image",
+    "Model",
+    "Engine",
+    "Customer",
+    "FineTune",
+    "Embedding",
+    "Completion",
+    "Deployment",
+    "Moderation",
+    "ErrorObject",
+    "FineTuningJob",
+    "ChatCompletion",
+]
+
+# we explicitly tell type checkers that nothing is exported
+# from this file so that when we re-export the old symbols
+# in `openai/__init__.py` they aren't added to the auto-complete
+# suggestions given by editors
+if TYPE_CHECKING:
+    __all__: list[str] = []
+else:
+    __all__ = SYMBOLS
+
+
+__locals = locals()
+for symbol in SYMBOLS:
+    __locals[symbol] = APIRemovedInV1Proxy(symbol=symbol)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/_parsing/__init__.py b/.venv/lib/python3.12/site-packages/openai/lib/_parsing/__init__.py
new file mode 100644
index 00000000..4d454c3a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/_parsing/__init__.py
@@ -0,0 +1,12 @@
+from ._completions import (
+    ResponseFormatT as ResponseFormatT,
+    has_parseable_input,
+    has_parseable_input as has_parseable_input,
+    maybe_parse_content as maybe_parse_content,
+    validate_input_tools as validate_input_tools,
+    parse_chat_completion as parse_chat_completion,
+    get_input_tool_by_name as get_input_tool_by_name,
+    solve_response_format_t as solve_response_format_t,
+    parse_function_tool_arguments as parse_function_tool_arguments,
+    type_to_response_format_param as type_to_response_format_param,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/_parsing/_completions.py b/.venv/lib/python3.12/site-packages/openai/lib/_parsing/_completions.py
new file mode 100644
index 00000000..c160070b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/_parsing/_completions.py
@@ -0,0 +1,264 @@
+from __future__ import annotations
+
+import json
+from typing import TYPE_CHECKING, Any, Iterable, cast
+from typing_extensions import TypeVar, TypeGuard, assert_never
+
+import pydantic
+
+from .._tools import PydanticFunctionTool
+from ..._types import NOT_GIVEN, NotGiven
+from ..._utils import is_dict, is_given
+from ..._compat import PYDANTIC_V2, model_parse_json
+from ..._models import construct_type_unchecked
+from .._pydantic import is_basemodel_type, to_strict_json_schema, is_dataclass_like_type
+from ...types.chat import (
+    ParsedChoice,
+    ChatCompletion,
+    ParsedFunction,
+    ParsedChatCompletion,
+    ChatCompletionMessage,
+    ParsedFunctionToolCall,
+    ChatCompletionToolParam,
+    ParsedChatCompletionMessage,
+    completion_create_params,
+)
+from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
+from ...types.shared_params import FunctionDefinition
+from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
+from ...types.chat.chat_completion_message_tool_call import Function
+
+ResponseFormatT = TypeVar(
+    "ResponseFormatT",
+    # if it isn't given then we don't do any parsing
+    default=None,
+)
+_default_response_format: None = None
+
+
+def validate_input_tools(
+    tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+) -> None:
+    if not is_given(tools):
+        return
+
+    for tool in tools:
+        if tool["type"] != "function":
+            raise ValueError(
+                f"Currently only `function` tool types support auto-parsing; Received `{tool['type']}`",
+            )
+
+        strict = tool["function"].get("strict")
+        if strict is not True:
+            raise ValueError(
+                f"`{tool['function']['name']}` is not strict. Only `strict` function tools can be auto-parsed"
+            )
+
+
+def parse_chat_completion(
+    *,
+    response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | NotGiven,
+    input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    chat_completion: ChatCompletion | ParsedChatCompletion[object],
+) -> ParsedChatCompletion[ResponseFormatT]:
+    if is_given(input_tools):
+        input_tools = [t for t in input_tools]
+    else:
+        input_tools = []
+
+    choices: list[ParsedChoice[ResponseFormatT]] = []
+    for choice in chat_completion.choices:
+        if choice.finish_reason == "length":
+            raise LengthFinishReasonError(completion=chat_completion)
+
+        if choice.finish_reason == "content_filter":
+            raise ContentFilterFinishReasonError()
+
+        message = choice.message
+
+        tool_calls: list[ParsedFunctionToolCall] = []
+        if message.tool_calls:
+            for tool_call in message.tool_calls:
+                if tool_call.type == "function":
+                    tool_call_dict = tool_call.to_dict()
+                    tool_calls.append(
+                        construct_type_unchecked(
+                            value={
+                                **tool_call_dict,
+                                "function": {
+                                    **cast(Any, tool_call_dict["function"]),
+                                    "parsed_arguments": parse_function_tool_arguments(
+                                        input_tools=input_tools, function=tool_call.function
+                                    ),
+                                },
+                            },
+                            type_=ParsedFunctionToolCall,
+                        )
+                    )
+                elif TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(tool_call)
+                else:
+                    tool_calls.append(tool_call)
+
+        choices.append(
+            construct_type_unchecked(
+                type_=cast(Any, ParsedChoice)[solve_response_format_t(response_format)],
+                value={
+                    **choice.to_dict(),
+                    "message": {
+                        **message.to_dict(),
+                        "parsed": maybe_parse_content(
+                            response_format=response_format,
+                            message=message,
+                        ),
+                        "tool_calls": tool_calls if tool_calls else None,
+                    },
+                },
+            )
+        )
+
+    return cast(
+        ParsedChatCompletion[ResponseFormatT],
+        construct_type_unchecked(
+            type_=cast(Any, ParsedChatCompletion)[solve_response_format_t(response_format)],
+            value={
+                **chat_completion.to_dict(),
+                "choices": choices,
+            },
+        ),
+    )
+
+
+def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None:
+    return next((t for t in input_tools if t.get("function", {}).get("name") == name), None)
+
+
+def parse_function_tool_arguments(
+    *, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction
+) -> object:
+    input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name)
+    if not input_tool:
+        return None
+
+    input_fn = cast(object, input_tool.get("function"))
+    if isinstance(input_fn, PydanticFunctionTool):
+        return model_parse_json(input_fn.model, function.arguments)
+
+    input_fn = cast(FunctionDefinition, input_fn)
+
+    if not input_fn.get("strict"):
+        return None
+
+    return json.loads(function.arguments)
+
+
+def maybe_parse_content(
+    *,
+    response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+    message: ChatCompletionMessage | ParsedChatCompletionMessage[object],
+) -> ResponseFormatT | None:
+    if has_rich_response_format(response_format) and message.content and not message.refusal:
+        return _parse_content(response_format, message.content)
+
+    return None
+
+
+def solve_response_format_t(
+    response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+) -> type[ResponseFormatT]:
+    """Return the runtime type for the given response format.
+
+    If no response format is given, or if we won't auto-parse the response format
+    then we default to `None`.
+    """
+    if has_rich_response_format(response_format):
+        return response_format
+
+    return cast("type[ResponseFormatT]", _default_response_format)
+
+
+def has_parseable_input(
+    *,
+    response_format: type | ResponseFormatParam | NotGiven,
+    input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+) -> bool:
+    if has_rich_response_format(response_format):
+        return True
+
+    for input_tool in input_tools or []:
+        if is_parseable_tool(input_tool):
+            return True
+
+    return False
+
+
+def has_rich_response_format(
+    response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+) -> TypeGuard[type[ResponseFormatT]]:
+    if not is_given(response_format):
+        return False
+
+    if is_response_format_param(response_format):
+        return False
+
+    return True
+
+
+def is_response_format_param(response_format: object) -> TypeGuard[ResponseFormatParam]:
+    return is_dict(response_format)
+
+
+def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool:
+    input_fn = cast(object, input_tool.get("function"))
+    if isinstance(input_fn, PydanticFunctionTool):
+        return True
+
+    return cast(FunctionDefinition, input_fn).get("strict") or False
+
+
+def _parse_content(response_format: type[ResponseFormatT], content: str) -> ResponseFormatT:
+    if is_basemodel_type(response_format):
+        return cast(ResponseFormatT, model_parse_json(response_format, content))
+
+    if is_dataclass_like_type(response_format):
+        if not PYDANTIC_V2:
+            raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {response_format}")
+
+        return pydantic.TypeAdapter(response_format).validate_json(content)
+
+    raise TypeError(f"Unable to automatically parse response format type {response_format}")
+
+
+def type_to_response_format_param(
+    response_format: type | completion_create_params.ResponseFormat | NotGiven,
+) -> ResponseFormatParam | NotGiven:
+    if not is_given(response_format):
+        return NOT_GIVEN
+
+    if is_response_format_param(response_format):
+        return response_format
+
+    # type checkers don't narrow the negation of a `TypeGuard` as it isn't
+    # a safe default behaviour but we know that at this point the `response_format`
+    # can only be a `type`
+    response_format = cast(type, response_format)
+
+    json_schema_type: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any] | None = None
+
+    if is_basemodel_type(response_format):
+        name = response_format.__name__
+        json_schema_type = response_format
+    elif is_dataclass_like_type(response_format):
+        name = response_format.__name__
+        json_schema_type = pydantic.TypeAdapter(response_format)
+    else:
+        raise TypeError(f"Unsupported response_format type - {response_format}")
+
+    return {
+        "type": "json_schema",
+        "json_schema": {
+            "schema": to_strict_json_schema(json_schema_type),
+            "name": name,
+            "strict": True,
+        },
+    }
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/_parsing/_responses.py b/.venv/lib/python3.12/site-packages/openai/lib/_parsing/_responses.py
new file mode 100644
index 00000000..a189dcf9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/_parsing/_responses.py
@@ -0,0 +1,168 @@
+from __future__ import annotations
+
+import json
+from typing import TYPE_CHECKING, Any, List, Iterable, cast
+from typing_extensions import TypeVar, assert_never
+
+import pydantic
+
+from .._tools import ResponsesPydanticFunctionTool
+from ..._types import NotGiven
+from ..._utils import is_given
+from ..._compat import PYDANTIC_V2, model_parse_json
+from ..._models import construct_type_unchecked
+from .._pydantic import is_basemodel_type, is_dataclass_like_type
+from ._completions import solve_response_format_t, type_to_response_format_param
+from ...types.responses import (
+    Response,
+    ToolParam,
+    ParsedContent,
+    ParsedResponse,
+    FunctionToolParam,
+    ParsedResponseOutputItem,
+    ParsedResponseOutputText,
+    ResponseFunctionToolCall,
+    ParsedResponseOutputMessage,
+    ResponseFormatTextConfigParam,
+    ParsedResponseFunctionToolCall,
+)
+from ...types.chat.completion_create_params import ResponseFormat
+
+TextFormatT = TypeVar(
+    "TextFormatT",
+    # if it isn't given then we don't do any parsing
+    default=None,
+)
+
+
+def type_to_text_format_param(type_: type) -> ResponseFormatTextConfigParam:
+    response_format_dict = type_to_response_format_param(type_)
+    assert is_given(response_format_dict)
+    response_format_dict = cast(ResponseFormat, response_format_dict)  # pyright: ignore[reportUnnecessaryCast]
+    assert response_format_dict["type"] == "json_schema"
+    assert "schema" in response_format_dict["json_schema"]
+
+    return {
+        "type": "json_schema",
+        "strict": True,
+        "name": response_format_dict["json_schema"]["name"],
+        "schema": response_format_dict["json_schema"]["schema"],
+    }
+
+
+def parse_response(
+    *,
+    text_format: type[TextFormatT] | NotGiven,
+    input_tools: Iterable[ToolParam] | NotGiven | None,
+    response: Response | ParsedResponse[object],
+) -> ParsedResponse[TextFormatT]:
+    solved_t = solve_response_format_t(text_format)
+    output_list: List[ParsedResponseOutputItem[TextFormatT]] = []
+
+    for output in response.output:
+        if output.type == "message":
+            content_list: List[ParsedContent[TextFormatT]] = []
+            for item in output.content:
+                if item.type != "output_text":
+                    content_list.append(item)
+                    continue
+
+                content_list.append(
+                    construct_type_unchecked(
+                        type_=cast(Any, ParsedResponseOutputText)[solved_t],
+                        value={
+                            **item.to_dict(),
+                            "parsed": parse_text(item.text, text_format=text_format),
+                        },
+                    )
+                )
+
+            output_list.append(
+                construct_type_unchecked(
+                    type_=cast(Any, ParsedResponseOutputMessage)[solved_t],
+                    value={
+                        **output.to_dict(),
+                        "content": content_list,
+                    },
+                )
+            )
+        elif output.type == "function_call":
+            output_list.append(
+                construct_type_unchecked(
+                    type_=ParsedResponseFunctionToolCall,
+                    value={
+                        **output.to_dict(),
+                        "parsed_arguments": parse_function_tool_arguments(
+                            input_tools=input_tools, function_call=output
+                        ),
+                    },
+                )
+            )
+        elif (
+            output.type == "computer_call"
+            or output.type == "file_search_call"
+            or output.type == "web_search_call"
+            or output.type == "reasoning"
+        ):
+            output_list.append(output)
+        elif TYPE_CHECKING:  # type: ignore
+            assert_never(output)
+        else:
+            output_list.append(output)
+
+    return cast(
+        ParsedResponse[TextFormatT],
+        construct_type_unchecked(
+            type_=cast(Any, ParsedResponse)[solved_t],
+            value={
+                **response.to_dict(),
+                "output": output_list,
+            },
+        ),
+    )
+
+
+def parse_text(text: str, text_format: type[TextFormatT] | NotGiven) -> TextFormatT | None:
+    if not is_given(text_format):
+        return None
+
+    if is_basemodel_type(text_format):
+        return cast(TextFormatT, model_parse_json(text_format, text))
+
+    if is_dataclass_like_type(text_format):
+        if not PYDANTIC_V2:
+            raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {text_format}")
+
+        return pydantic.TypeAdapter(text_format).validate_json(text)
+
+    raise TypeError(f"Unable to automatically parse response format type {text_format}")
+
+
+def get_input_tool_by_name(*, input_tools: Iterable[ToolParam], name: str) -> FunctionToolParam | None:
+    for tool in input_tools:
+        if tool["type"] == "function" and tool.get("name") == name:
+            return tool
+
+    return None
+
+
+def parse_function_tool_arguments(
+    *,
+    input_tools: Iterable[ToolParam] | NotGiven | None,
+    function_call: ParsedResponseFunctionToolCall | ResponseFunctionToolCall,
+) -> object:
+    if input_tools is None or not is_given(input_tools):
+        return None
+
+    input_tool = get_input_tool_by_name(input_tools=input_tools, name=function_call.name)
+    if not input_tool:
+        return None
+
+    tool = cast(object, input_tool)
+    if isinstance(tool, ResponsesPydanticFunctionTool):
+        return model_parse_json(tool.model, function_call.arguments)
+
+    if not input_tool.get("strict"):
+        return None
+
+    return json.loads(function_call.arguments)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/_pydantic.py b/.venv/lib/python3.12/site-packages/openai/lib/_pydantic.py
new file mode 100644
index 00000000..c2d73e5f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/_pydantic.py
@@ -0,0 +1,155 @@
+from __future__ import annotations
+
+import inspect
+from typing import Any, TypeVar
+from typing_extensions import TypeGuard
+
+import pydantic
+
+from .._types import NOT_GIVEN
+from .._utils import is_dict as _is_dict, is_list
+from .._compat import PYDANTIC_V2, model_json_schema
+
+_T = TypeVar("_T")
+
+
+def to_strict_json_schema(model: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any]) -> dict[str, Any]:
+    if inspect.isclass(model) and is_basemodel_type(model):
+        schema = model_json_schema(model)
+    elif PYDANTIC_V2 and isinstance(model, pydantic.TypeAdapter):
+        schema = model.json_schema()
+    else:
+        raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {model}")
+
+    return _ensure_strict_json_schema(schema, path=(), root=schema)
+
+
+def _ensure_strict_json_schema(
+    json_schema: object,
+    *,
+    path: tuple[str, ...],
+    root: dict[str, object],
+) -> dict[str, Any]:
+    """Mutates the given JSON schema to ensure it conforms to the `strict` standard
+    that the API expects.
+    """
+    if not is_dict(json_schema):
+        raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
+
+    defs = json_schema.get("$defs")
+    if is_dict(defs):
+        for def_name, def_schema in defs.items():
+            _ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name), root=root)
+
+    definitions = json_schema.get("definitions")
+    if is_dict(definitions):
+        for definition_name, definition_schema in definitions.items():
+            _ensure_strict_json_schema(definition_schema, path=(*path, "definitions", definition_name), root=root)
+
+    typ = json_schema.get("type")
+    if typ == "object" and "additionalProperties" not in json_schema:
+        json_schema["additionalProperties"] = False
+
+    # object types
+    # { 'type': 'object', 'properties': { 'a':  {...} } }
+    properties = json_schema.get("properties")
+    if is_dict(properties):
+        json_schema["required"] = [prop for prop in properties.keys()]
+        json_schema["properties"] = {
+            key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key), root=root)
+            for key, prop_schema in properties.items()
+        }
+
+    # arrays
+    # { 'type': 'array', 'items': {...} }
+    items = json_schema.get("items")
+    if is_dict(items):
+        json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"), root=root)
+
+    # unions
+    any_of = json_schema.get("anyOf")
+    if is_list(any_of):
+        json_schema["anyOf"] = [
+            _ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i)), root=root)
+            for i, variant in enumerate(any_of)
+        ]
+
+    # intersections
+    all_of = json_schema.get("allOf")
+    if is_list(all_of):
+        if len(all_of) == 1:
+            json_schema.update(_ensure_strict_json_schema(all_of[0], path=(*path, "allOf", "0"), root=root))
+            json_schema.pop("allOf")
+        else:
+            json_schema["allOf"] = [
+                _ensure_strict_json_schema(entry, path=(*path, "allOf", str(i)), root=root)
+                for i, entry in enumerate(all_of)
+            ]
+
+    # strip `None` defaults as there's no meaningful distinction here
+    # the schema will still be `nullable` and the model will default
+    # to using `None` anyway
+    if json_schema.get("default", NOT_GIVEN) is None:
+        json_schema.pop("default")
+
+    # we can't use `$ref`s if there are also other properties defined, e.g.
+    # `{"$ref": "...", "description": "my description"}`
+    #
+    # so we unravel the ref
+    # `{"type": "string", "description": "my description"}`
+    ref = json_schema.get("$ref")
+    if ref and has_more_than_n_keys(json_schema, 1):
+        assert isinstance(ref, str), f"Received non-string $ref - {ref}"
+
+        resolved = resolve_ref(root=root, ref=ref)
+        if not is_dict(resolved):
+            raise ValueError(f"Expected `$ref: {ref}` to resolved to a dictionary but got {resolved}")
+
+        # properties from the json schema take priority over the ones on the `$ref`
+        json_schema.update({**resolved, **json_schema})
+        json_schema.pop("$ref")
+        # Since the schema expanded from `$ref` might not have `additionalProperties: false` applied,
+        # we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid.
+        return _ensure_strict_json_schema(json_schema, path=path, root=root)
+
+    return json_schema
+
+
+def resolve_ref(*, root: dict[str, object], ref: str) -> object:
+    if not ref.startswith("#/"):
+        raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/")
+
+    path = ref[2:].split("/")
+    resolved = root
+    for key in path:
+        value = resolved[key]
+        assert is_dict(value), f"encountered non-dictionary entry while resolving {ref} - {resolved}"
+        resolved = value
+
+    return resolved
+
+
+def is_basemodel_type(typ: type) -> TypeGuard[type[pydantic.BaseModel]]:
+    if not inspect.isclass(typ):
+        return False
+    return issubclass(typ, pydantic.BaseModel)
+
+
+def is_dataclass_like_type(typ: type) -> bool:
+    """Returns True if the given type likely used `@pydantic.dataclass`"""
+    return hasattr(typ, "__pydantic_config__")
+
+
+def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
+    # just pretend that we know there are only `str` keys
+    # as that check is not worth the performance cost
+    return _is_dict(obj)
+
+
+def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:
+    i = 0
+    for _ in obj.keys():
+        i += 1
+        if i > n:
+            return True
+    return False
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/_tools.py b/.venv/lib/python3.12/site-packages/openai/lib/_tools.py
new file mode 100644
index 00000000..415d7500
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/_tools.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+from typing import Any, Dict, cast
+
+import pydantic
+
+from ._pydantic import to_strict_json_schema
+from ..types.chat import ChatCompletionToolParam
+from ..types.shared_params import FunctionDefinition
+from ..types.responses.function_tool_param import FunctionToolParam as ResponsesFunctionToolParam
+
+
+class PydanticFunctionTool(Dict[str, Any]):
+    """Dictionary wrapper so we can pass the given base model
+    throughout the entire request stack without having to special
+    case it.
+    """
+
+    model: type[pydantic.BaseModel]
+
+    def __init__(self, defn: FunctionDefinition, model: type[pydantic.BaseModel]) -> None:
+        super().__init__(defn)
+        self.model = model
+
+    def cast(self) -> FunctionDefinition:
+        return cast(FunctionDefinition, self)
+
+
+class ResponsesPydanticFunctionTool(Dict[str, Any]):
+    model: type[pydantic.BaseModel]
+
+    def __init__(self, tool: ResponsesFunctionToolParam, model: type[pydantic.BaseModel]) -> None:
+        super().__init__(tool)
+        self.model = model
+
+    def cast(self) -> ResponsesFunctionToolParam:
+        return cast(ResponsesFunctionToolParam, self)
+
+
+def pydantic_function_tool(
+    model: type[pydantic.BaseModel],
+    *,
+    name: str | None = None,  # inferred from class name by default
+    description: str | None = None,  # inferred from class docstring by default
+) -> ChatCompletionToolParam:
+    if description is None:
+        # note: we intentionally don't use `.getdoc()` to avoid
+        # including pydantic's docstrings
+        description = model.__doc__
+
+    function = PydanticFunctionTool(
+        {
+            "name": name or model.__name__,
+            "strict": True,
+            "parameters": to_strict_json_schema(model),
+        },
+        model,
+    ).cast()
+
+    if description is not None:
+        function["description"] = description
+
+    return {
+        "type": "function",
+        "function": function,
+    }
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/_validators.py b/.venv/lib/python3.12/site-packages/openai/lib/_validators.py
new file mode 100644
index 00000000..cf24cd22
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/_validators.py
@@ -0,0 +1,809 @@
+# pyright: basic
+from __future__ import annotations
+
+import os
+import sys
+from typing import Any, TypeVar, Callable, Optional, NamedTuple
+from typing_extensions import TypeAlias
+
+from .._extras import pandas as pd
+
+
+class Remediation(NamedTuple):
+    name: str
+    immediate_msg: Optional[str] = None
+    necessary_msg: Optional[str] = None
+    necessary_fn: Optional[Callable[[Any], Any]] = None
+    optional_msg: Optional[str] = None
+    optional_fn: Optional[Callable[[Any], Any]] = None
+    error_msg: Optional[str] = None
+
+
+OptionalDataFrameT = TypeVar("OptionalDataFrameT", bound="Optional[pd.DataFrame]")
+
+
+def num_examples_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will only print out the number of examples and recommend to the user to increase the number of examples if less than 100.
+    """
+    MIN_EXAMPLES = 100
+    optional_suggestion = (
+        ""
+        if len(df) >= MIN_EXAMPLES
+        else ". In general, we recommend having at least a few hundred examples. We've found that performance tends to linearly increase for every doubling of the number of examples"
+    )
+    immediate_msg = f"\n- Your file contains {len(df)} prompt-completion pairs{optional_suggestion}"
+    return Remediation(name="num_examples", immediate_msg=immediate_msg)
+
+
+def necessary_column_validator(df: pd.DataFrame, necessary_column: str) -> Remediation:
+    """
+    This validator will ensure that the necessary column is present in the dataframe.
+    """
+
+    def lower_case_column(df: pd.DataFrame, column: Any) -> pd.DataFrame:
+        cols = [c for c in df.columns if str(c).lower() == column]
+        df.rename(columns={cols[0]: column.lower()}, inplace=True)
+        return df
+
+    immediate_msg = None
+    necessary_fn = None
+    necessary_msg = None
+    error_msg = None
+
+    if necessary_column not in df.columns:
+        if necessary_column in [str(c).lower() for c in df.columns]:
+
+            def lower_case_column_creator(df: pd.DataFrame) -> pd.DataFrame:
+                return lower_case_column(df, necessary_column)
+
+            necessary_fn = lower_case_column_creator
+            immediate_msg = f"\n- The `{necessary_column}` column/key should be lowercase"
+            necessary_msg = f"Lower case column name to `{necessary_column}`"
+        else:
+            error_msg = f"`{necessary_column}` column/key is missing. Please make sure you name your columns/keys appropriately, then retry"
+
+    return Remediation(
+        name="necessary_column",
+        immediate_msg=immediate_msg,
+        necessary_msg=necessary_msg,
+        necessary_fn=necessary_fn,
+        error_msg=error_msg,
+    )
+
+
+def additional_column_validator(df: pd.DataFrame, fields: list[str] = ["prompt", "completion"]) -> Remediation:
+    """
+    This validator will remove additional columns from the dataframe.
+    """
+    additional_columns = []
+    necessary_msg = None
+    immediate_msg = None
+    necessary_fn = None  # type: ignore
+
+    if len(df.columns) > 2:
+        additional_columns = [c for c in df.columns if c not in fields]
+        warn_message = ""
+        for ac in additional_columns:
+            dups = [c for c in additional_columns if ac in c]
+            if len(dups) > 0:
+                warn_message += f"\n  WARNING: Some of the additional columns/keys contain `{ac}` in their name. These will be ignored, and the column/key `{ac}` will be used instead. This could also result from a duplicate column/key in the provided file."
+        immediate_msg = f"\n- The input file should contain exactly two columns/keys per row. Additional columns/keys present are: {additional_columns}{warn_message}"
+        necessary_msg = f"Remove additional columns/keys: {additional_columns}"
+
+        def necessary_fn(x: Any) -> Any:
+            return x[fields]
+
+    return Remediation(
+        name="additional_column",
+        immediate_msg=immediate_msg,
+        necessary_msg=necessary_msg,
+        necessary_fn=necessary_fn,
+    )
+
+
+def non_empty_field_validator(df: pd.DataFrame, field: str = "completion") -> Remediation:
+    """
+    This validator will ensure that no completion is empty.
+    """
+    necessary_msg = None
+    necessary_fn = None  # type: ignore
+    immediate_msg = None
+
+    if df[field].apply(lambda x: x == "").any() or df[field].isnull().any():
+        empty_rows = (df[field] == "") | (df[field].isnull())
+        empty_indexes = df.reset_index().index[empty_rows].tolist()
+        immediate_msg = f"\n- `{field}` column/key should not contain empty strings. These are rows: {empty_indexes}"
+
+        def necessary_fn(x: Any) -> Any:
+            return x[x[field] != ""].dropna(subset=[field])
+
+        necessary_msg = f"Remove {len(empty_indexes)} rows with empty {field}s"
+
+    return Remediation(
+        name=f"empty_{field}",
+        immediate_msg=immediate_msg,
+        necessary_msg=necessary_msg,
+        necessary_fn=necessary_fn,
+    )
+
+
+def duplicated_rows_validator(df: pd.DataFrame, fields: list[str] = ["prompt", "completion"]) -> Remediation:
+    """
+    This validator will suggest to the user to remove duplicate rows if they exist.
+    """
+    duplicated_rows = df.duplicated(subset=fields)
+    duplicated_indexes = df.reset_index().index[duplicated_rows].tolist()
+    immediate_msg = None
+    optional_msg = None
+    optional_fn = None  # type: ignore
+
+    if len(duplicated_indexes) > 0:
+        immediate_msg = f"\n- There are {len(duplicated_indexes)} duplicated {'-'.join(fields)} sets. These are rows: {duplicated_indexes}"
+        optional_msg = f"Remove {len(duplicated_indexes)} duplicate rows"
+
+        def optional_fn(x: Any) -> Any:
+            return x.drop_duplicates(subset=fields)
+
+    return Remediation(
+        name="duplicated_rows",
+        immediate_msg=immediate_msg,
+        optional_msg=optional_msg,
+        optional_fn=optional_fn,
+    )
+
+
+def long_examples_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will suggest to the user to remove examples that are too long.
+    """
+    immediate_msg = None
+    optional_msg = None
+    optional_fn = None  # type: ignore
+
+    ft_type = infer_task_type(df)
+    if ft_type != "open-ended generation":
+
+        def get_long_indexes(d: pd.DataFrame) -> Any:
+            long_examples = d.apply(lambda x: len(x.prompt) + len(x.completion) > 10000, axis=1)
+            return d.reset_index().index[long_examples].tolist()
+
+        long_indexes = get_long_indexes(df)
+
+        if len(long_indexes) > 0:
+            immediate_msg = f"\n- There are {len(long_indexes)} examples that are very long. These are rows: {long_indexes}\nFor conditional generation, and for classification the examples shouldn't be longer than 2048 tokens."
+            optional_msg = f"Remove {len(long_indexes)} long examples"
+
+            def optional_fn(x: Any) -> Any:
+                long_indexes_to_drop = get_long_indexes(x)
+                if long_indexes != long_indexes_to_drop:
+                    sys.stdout.write(
+                        f"The indices of the long examples has changed as a result of a previously applied recommendation.\nThe {len(long_indexes_to_drop)} long examples to be dropped are now at the following indices: {long_indexes_to_drop}\n"
+                    )
+                return x.drop(long_indexes_to_drop)
+
+    return Remediation(
+        name="long_examples",
+        immediate_msg=immediate_msg,
+        optional_msg=optional_msg,
+        optional_fn=optional_fn,
+    )
+
+
+def common_prompt_suffix_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will suggest to add a common suffix to the prompt if one doesn't already exist in case of classification or conditional generation.
+    """
+    error_msg = None
+    immediate_msg = None
+    optional_msg = None
+    optional_fn = None  # type: ignore
+
+    # Find a suffix which is not contained within the prompt otherwise
+    suggested_suffix = "\n\n### =>\n\n"
+    suffix_options = [
+        " ->",
+        "\n\n###\n\n",
+        "\n\n===\n\n",
+        "\n\n---\n\n",
+        "\n\n===>\n\n",
+        "\n\n--->\n\n",
+    ]
+    for suffix_option in suffix_options:
+        if suffix_option == " ->":
+            if df.prompt.str.contains("\n").any():
+                continue
+        if df.prompt.str.contains(suffix_option, regex=False).any():
+            continue
+        suggested_suffix = suffix_option
+        break
+    display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
+
+    ft_type = infer_task_type(df)
+    if ft_type == "open-ended generation":
+        return Remediation(name="common_suffix")
+
+    def add_suffix(x: Any, suffix: Any) -> Any:
+        x["prompt"] += suffix
+        return x
+
+    common_suffix = get_common_xfix(df.prompt, xfix="suffix")
+    if (df.prompt == common_suffix).all():
+        error_msg = f"All prompts are identical: `{common_suffix}`\nConsider leaving the prompts blank if you want to do open-ended generation, otherwise ensure prompts are different"
+        return Remediation(name="common_suffix", error_msg=error_msg)
+
+    if common_suffix != "":
+        common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
+        immediate_msg = f"\n- All prompts end with suffix `{common_suffix_new_line_handled}`"
+        if len(common_suffix) > 10:
+            immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
+        if df.prompt.str[: -len(common_suffix)].str.contains(common_suffix, regex=False).any():
+            immediate_msg += f"\n  WARNING: Some of your prompts contain the suffix `{common_suffix}` more than once. We strongly suggest that you review your prompts and add a unique suffix"
+
+    else:
+        immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty"
+
+    if common_suffix == "":
+        optional_msg = f"Add a suffix separator `{display_suggested_suffix}` to all prompts"
+
+        def optional_fn(x: Any) -> Any:
+            return add_suffix(x, suggested_suffix)
+
+    return Remediation(
+        name="common_completion_suffix",
+        immediate_msg=immediate_msg,
+        optional_msg=optional_msg,
+        optional_fn=optional_fn,
+        error_msg=error_msg,
+    )
+
+
+def common_prompt_prefix_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will suggest to remove a common prefix from the prompt if a long one exist.
+    """
+    MAX_PREFIX_LEN = 12
+
+    immediate_msg = None
+    optional_msg = None
+    optional_fn = None  # type: ignore
+
+    common_prefix = get_common_xfix(df.prompt, xfix="prefix")
+    if common_prefix == "":
+        return Remediation(name="common_prefix")
+
+    def remove_common_prefix(x: Any, prefix: Any) -> Any:
+        x["prompt"] = x["prompt"].str[len(prefix) :]
+        return x
+
+    if (df.prompt == common_prefix).all():
+        # already handled by common_suffix_validator
+        return Remediation(name="common_prefix")
+
+    if common_prefix != "":
+        immediate_msg = f"\n- All prompts start with prefix `{common_prefix}`"
+        if MAX_PREFIX_LEN < len(common_prefix):
+            immediate_msg += ". Fine-tuning doesn't require the instruction specifying the task, or a few-shot example scenario. Most of the time you should only add the input data into the prompt, and the desired output into the completion"
+            optional_msg = f"Remove prefix `{common_prefix}` from all prompts"
+
+            def optional_fn(x: Any) -> Any:
+                return remove_common_prefix(x, common_prefix)
+
+    return Remediation(
+        name="common_prompt_prefix",
+        immediate_msg=immediate_msg,
+        optional_msg=optional_msg,
+        optional_fn=optional_fn,
+    )
+
+
+def common_completion_prefix_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will suggest to remove a common prefix from the completion if a long one exist.
+    """
+    MAX_PREFIX_LEN = 5
+
+    common_prefix = get_common_xfix(df.completion, xfix="prefix")
+    ws_prefix = len(common_prefix) > 0 and common_prefix[0] == " "
+    if len(common_prefix) < MAX_PREFIX_LEN:
+        return Remediation(name="common_prefix")
+
+    def remove_common_prefix(x: Any, prefix: Any, ws_prefix: Any) -> Any:
+        x["completion"] = x["completion"].str[len(prefix) :]
+        if ws_prefix:
+            # keep the single whitespace as prefix
+            x["completion"] = f" {x['completion']}"
+        return x
+
+    if (df.completion == common_prefix).all():
+        # already handled by common_suffix_validator
+        return Remediation(name="common_prefix")
+
+    immediate_msg = f"\n- All completions start with prefix `{common_prefix}`. Most of the time you should only add the output data into the completion, without any prefix"
+    optional_msg = f"Remove prefix `{common_prefix}` from all completions"
+
+    def optional_fn(x: Any) -> Any:
+        return remove_common_prefix(x, common_prefix, ws_prefix)
+
+    return Remediation(
+        name="common_completion_prefix",
+        immediate_msg=immediate_msg,
+        optional_msg=optional_msg,
+        optional_fn=optional_fn,
+    )
+
+
+def common_completion_suffix_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will suggest to add a common suffix to the completion if one doesn't already exist in case of classification or conditional generation.
+    """
+    error_msg = None
+    immediate_msg = None
+    optional_msg = None
+    optional_fn = None  # type: ignore
+
+    ft_type = infer_task_type(df)
+    if ft_type == "open-ended generation" or ft_type == "classification":
+        return Remediation(name="common_suffix")
+
+    common_suffix = get_common_xfix(df.completion, xfix="suffix")
+    if (df.completion == common_suffix).all():
+        error_msg = f"All completions are identical: `{common_suffix}`\nEnsure completions are different, otherwise the model will just repeat `{common_suffix}`"
+        return Remediation(name="common_suffix", error_msg=error_msg)
+
+    # Find a suffix which is not contained within the completion otherwise
+    suggested_suffix = " [END]"
+    suffix_options = [
+        "\n",
+        ".",
+        " END",
+        "***",
+        "+++",
+        "&&&",
+        "$$$",
+        "@@@",
+        "%%%",
+    ]
+    for suffix_option in suffix_options:
+        if df.completion.str.contains(suffix_option, regex=False).any():
+            continue
+        suggested_suffix = suffix_option
+        break
+    display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
+
+    def add_suffix(x: Any, suffix: Any) -> Any:
+        x["completion"] += suffix
+        return x
+
+    if common_suffix != "":
+        common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
+        immediate_msg = f"\n- All completions end with suffix `{common_suffix_new_line_handled}`"
+        if len(common_suffix) > 10:
+            immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
+        if df.completion.str[: -len(common_suffix)].str.contains(common_suffix, regex=False).any():
+            immediate_msg += f"\n  WARNING: Some of your completions contain the suffix `{common_suffix}` more than once. We suggest that you review your completions and add a unique ending"
+
+    else:
+        immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples."
+
+    if common_suffix == "":
+        optional_msg = f"Add a suffix ending `{display_suggested_suffix}` to all completions"
+
+        def optional_fn(x: Any) -> Any:
+            return add_suffix(x, suggested_suffix)
+
+    return Remediation(
+        name="common_completion_suffix",
+        immediate_msg=immediate_msg,
+        optional_msg=optional_msg,
+        optional_fn=optional_fn,
+        error_msg=error_msg,
+    )
+
+
+def completions_space_start_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will suggest to add a space at the start of the completion if it doesn't already exist. This helps with tokenization.
+    """
+
+    def add_space_start(x: Any) -> Any:
+        x["completion"] = x["completion"].apply(lambda s: ("" if s.startswith(" ") else " ") + s)
+        return x
+
+    optional_msg = None
+    optional_fn = None
+    immediate_msg = None
+
+    if df.completion.str[:1].nunique() != 1 or df.completion.values[0][0] != " ":
+        immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details"
+        optional_msg = "Add a whitespace character to the beginning of the completion"
+        optional_fn = add_space_start
+    return Remediation(
+        name="completion_space_start",
+        immediate_msg=immediate_msg,
+        optional_msg=optional_msg,
+        optional_fn=optional_fn,
+    )
+
+
+def lower_case_validator(df: pd.DataFrame, column: Any) -> Remediation | None:
+    """
+    This validator will suggest to lowercase the column values, if more than a third of letters are uppercase.
+    """
+
+    def lower_case(x: Any) -> Any:
+        x[column] = x[column].str.lower()
+        return x
+
+    count_upper = df[column].apply(lambda x: sum(1 for c in x if c.isalpha() and c.isupper())).sum()
+    count_lower = df[column].apply(lambda x: sum(1 for c in x if c.isalpha() and c.islower())).sum()
+
+    if count_upper * 2 > count_lower:
+        return Remediation(
+            name="lower_case",
+            immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details",
+            optional_msg=f"Lowercase all your data in column/key `{column}`",
+            optional_fn=lower_case,
+        )
+    return None
+
+
+def read_any_format(
+    fname: str, fields: list[str] = ["prompt", "completion"]
+) -> tuple[pd.DataFrame | None, Remediation]:
+    """
+    This function will read a file saved in .csv, .json, .txt, .xlsx or .tsv format using pandas.
+     - for .xlsx it will read the first sheet
+     - for .txt it will assume completions and split on newline
+    """
+    remediation = None
+    necessary_msg = None
+    immediate_msg = None
+    error_msg = None
+    df = None
+
+    if os.path.isfile(fname):
+        try:
+            if fname.lower().endswith(".csv") or fname.lower().endswith(".tsv"):
+                file_extension_str, separator = ("CSV", ",") if fname.lower().endswith(".csv") else ("TSV", "\t")
+                immediate_msg = (
+                    f"\n- Based on your file extension, your file is formatted as a {file_extension_str} file"
+                )
+                necessary_msg = f"Your format `{file_extension_str}` will be converted to `JSONL`"
+                df = pd.read_csv(fname, sep=separator, dtype=str).fillna("")
+            elif fname.lower().endswith(".xlsx"):
+                immediate_msg = "\n- Based on your file extension, your file is formatted as an Excel file"
+                necessary_msg = "Your format `XLSX` will be converted to `JSONL`"
+                xls = pd.ExcelFile(fname)
+                sheets = xls.sheet_names
+                if len(sheets) > 1:
+                    immediate_msg += "\n- Your Excel file contains more than one sheet. Please either save as csv or ensure all data is present in the first sheet. WARNING: Reading only the first sheet..."
+                df = pd.read_excel(fname, dtype=str).fillna("")
+            elif fname.lower().endswith(".txt"):
+                immediate_msg = "\n- Based on your file extension, you provided a text file"
+                necessary_msg = "Your format `TXT` will be converted to `JSONL`"
+                with open(fname, "r") as f:
+                    content = f.read()
+                    df = pd.DataFrame(
+                        [["", line] for line in content.split("\n")],
+                        columns=fields,
+                        dtype=str,
+                    ).fillna("")
+            elif fname.lower().endswith(".jsonl"):
+                df = pd.read_json(fname, lines=True, dtype=str).fillna("")  # type: ignore
+                if len(df) == 1:  # type: ignore
+                    # this is NOT what we expect for a .jsonl file
+                    immediate_msg = "\n- Your JSONL file appears to be in a JSON format. Your file will be converted to JSONL format"
+                    necessary_msg = "Your format `JSON` will be converted to `JSONL`"
+                    df = pd.read_json(fname, dtype=str).fillna("")  # type: ignore
+                else:
+                    pass  # this is what we expect for a .jsonl file
+            elif fname.lower().endswith(".json"):
+                try:
+                    # to handle case where .json file is actually a .jsonl file
+                    df = pd.read_json(fname, lines=True, dtype=str).fillna("")  # type: ignore
+                    if len(df) == 1:  # type: ignore
+                        # this code path corresponds to a .json file that has one line
+                        df = pd.read_json(fname, dtype=str).fillna("")  # type: ignore
+                    else:
+                        # this is NOT what we expect for a .json file
+                        immediate_msg = "\n- Your JSON file appears to be in a JSONL format. Your file will be converted to JSONL format"
+                        necessary_msg = "Your format `JSON` will be converted to `JSONL`"
+                except ValueError:
+                    # this code path corresponds to a .json file that has multiple lines (i.e. it is indented)
+                    df = pd.read_json(fname, dtype=str).fillna("")  # type: ignore
+            else:
+                error_msg = (
+                    "Your file must have one of the following extensions: .CSV, .TSV, .XLSX, .TXT, .JSON or .JSONL"
+                )
+                if "." in fname:
+                    error_msg += f" Your file `{fname}` ends with the extension `.{fname.split('.')[-1]}` which is not supported."
+                else:
+                    error_msg += f" Your file `{fname}` is missing a file extension."
+
+        except (ValueError, TypeError):
+            file_extension_str = fname.split(".")[-1].upper()
+            error_msg = f"Your file `{fname}` does not appear to be in valid {file_extension_str} format. Please ensure your file is formatted as a valid {file_extension_str} file."
+
+    else:
+        error_msg = f"File {fname} does not exist."
+
+    remediation = Remediation(
+        name="read_any_format",
+        necessary_msg=necessary_msg,
+        immediate_msg=immediate_msg,
+        error_msg=error_msg,
+    )
+    return df, remediation
+
+
+def format_inferrer_validator(df: pd.DataFrame) -> Remediation:
+    """
+    This validator will infer the likely fine-tuning format of the data, and display it to the user if it is classification.
+    It will also suggest to use ada and explain train/validation split benefits.
+    """
+    ft_type = infer_task_type(df)
+    immediate_msg = None
+    if ft_type == "classification":
+        immediate_msg = f"\n- Based on your data it seems like you're trying to fine-tune a model for {ft_type}\n- For classification, we recommend you try one of the faster and cheaper models, such as `ada`\n- For classification, you can estimate the expected model performance by keeping a held out dataset, which is not used for training"
+    return Remediation(name="num_examples", immediate_msg=immediate_msg)
+
+
+def apply_necessary_remediation(df: OptionalDataFrameT, remediation: Remediation) -> OptionalDataFrameT:
+    """
+    This function will apply a necessary remediation to a dataframe, or print an error message if one exists.
+    """
+    if remediation.error_msg is not None:
+        sys.stderr.write(f"\n\nERROR in {remediation.name} validator: {remediation.error_msg}\n\nAborting...")
+        sys.exit(1)
+    if remediation.immediate_msg is not None:
+        sys.stdout.write(remediation.immediate_msg)
+    if remediation.necessary_fn is not None:
+        df = remediation.necessary_fn(df)
+    return df
+
+
+def accept_suggestion(input_text: str, auto_accept: bool) -> bool:
+    sys.stdout.write(input_text)
+    if auto_accept:
+        sys.stdout.write("Y\n")
+        return True
+    return input().lower() != "n"
+
+
+def apply_optional_remediation(
+    df: pd.DataFrame, remediation: Remediation, auto_accept: bool
+) -> tuple[pd.DataFrame, bool]:
+    """
+    This function will apply an optional remediation to a dataframe, based on the user input.
+    """
+    optional_applied = False
+    input_text = f"- [Recommended] {remediation.optional_msg} [Y/n]: "
+    if remediation.optional_msg is not None:
+        if accept_suggestion(input_text, auto_accept):
+            assert remediation.optional_fn is not None
+            df = remediation.optional_fn(df)
+            optional_applied = True
+    if remediation.necessary_msg is not None:
+        sys.stdout.write(f"- [Necessary] {remediation.necessary_msg}\n")
+    return df, optional_applied
+
+
+def estimate_fine_tuning_time(df: pd.DataFrame) -> None:
+    """
+    Estimate the time it'll take to fine-tune the dataset
+    """
+    ft_format = infer_task_type(df)
+    expected_time = 1.0
+    if ft_format == "classification":
+        num_examples = len(df)
+        expected_time = num_examples * 1.44
+    else:
+        size = df.memory_usage(index=True).sum()
+        expected_time = size * 0.0515
+
+    def format_time(time: float) -> str:
+        if time < 60:
+            return f"{round(time, 2)} seconds"
+        elif time < 3600:
+            return f"{round(time / 60, 2)} minutes"
+        elif time < 86400:
+            return f"{round(time / 3600, 2)} hours"
+        else:
+            return f"{round(time / 86400, 2)} days"
+
+    time_string = format_time(expected_time + 140)
+    sys.stdout.write(
+        f"Once your model starts training, it'll approximately take {time_string} to train a `curie` model, and less for `ada` and `babbage`. Queue will approximately take half an hour per job ahead of you.\n"
+    )
+
+
+def get_outfnames(fname: str, split: bool) -> list[str]:
+    suffixes = ["_train", "_valid"] if split else [""]
+    i = 0
+    while True:
+        index_suffix = f" ({i})" if i > 0 else ""
+        candidate_fnames = [f"{os.path.splitext(fname)[0]}_prepared{suffix}{index_suffix}.jsonl" for suffix in suffixes]
+        if not any(os.path.isfile(f) for f in candidate_fnames):
+            return candidate_fnames
+        i += 1
+
+
+def get_classification_hyperparams(df: pd.DataFrame) -> tuple[int, object]:
+    n_classes = df.completion.nunique()
+    pos_class = None
+    if n_classes == 2:
+        pos_class = df.completion.value_counts().index[0]
+    return n_classes, pos_class
+
+
+def write_out_file(df: pd.DataFrame, fname: str, any_remediations: bool, auto_accept: bool) -> None:
+    """
+    This function will write out a dataframe to a file, if the user would like to proceed, and also offer a fine-tuning command with the newly created file.
+    For classification it will optionally ask the user if they would like to split the data into train/valid files, and modify the suggested command to include the valid set.
+    """
+    ft_format = infer_task_type(df)
+    common_prompt_suffix = get_common_xfix(df.prompt, xfix="suffix")
+    common_completion_suffix = get_common_xfix(df.completion, xfix="suffix")
+
+    split = False
+    input_text = "- [Recommended] Would you like to split into training and validation set? [Y/n]: "
+    if ft_format == "classification":
+        if accept_suggestion(input_text, auto_accept):
+            split = True
+
+    additional_params = ""
+    common_prompt_suffix_new_line_handled = common_prompt_suffix.replace("\n", "\\n")
+    common_completion_suffix_new_line_handled = common_completion_suffix.replace("\n", "\\n")
+    optional_ending_string = (
+        f' Make sure to include `stop=["{common_completion_suffix_new_line_handled}"]` so that the generated texts ends at the expected place.'
+        if len(common_completion_suffix_new_line_handled) > 0
+        else ""
+    )
+
+    input_text = "\n\nYour data will be written to a new JSONL file. Proceed [Y/n]: "
+
+    if not any_remediations and not split:
+        sys.stdout.write(
+            f'\nYou can use your file for fine-tuning:\n> openai api fine_tunes.create -t "{fname}"{additional_params}\n\nAfter you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt.{optional_ending_string}\n'
+        )
+        estimate_fine_tuning_time(df)
+
+    elif accept_suggestion(input_text, auto_accept):
+        fnames = get_outfnames(fname, split)
+        if split:
+            assert len(fnames) == 2 and "train" in fnames[0] and "valid" in fnames[1]
+            MAX_VALID_EXAMPLES = 1000
+            n_train = max(len(df) - MAX_VALID_EXAMPLES, int(len(df) * 0.8))
+            df_train = df.sample(n=n_train, random_state=42)
+            df_valid = df.drop(df_train.index)
+            df_train[["prompt", "completion"]].to_json(  # type: ignore
+                fnames[0], lines=True, orient="records", force_ascii=False, indent=None
+            )
+            df_valid[["prompt", "completion"]].to_json(
+                fnames[1], lines=True, orient="records", force_ascii=False, indent=None
+            )
+
+            n_classes, pos_class = get_classification_hyperparams(df)
+            additional_params += " --compute_classification_metrics"
+            if n_classes == 2:
+                additional_params += f' --classification_positive_class "{pos_class}"'
+            else:
+                additional_params += f" --classification_n_classes {n_classes}"
+        else:
+            assert len(fnames) == 1
+            df[["prompt", "completion"]].to_json(
+                fnames[0], lines=True, orient="records", force_ascii=False, indent=None
+            )
+
+        # Add -v VALID_FILE if we split the file into train / valid
+        files_string = ("s" if split else "") + " to `" + ("` and `".join(fnames))
+        valid_string = f' -v "{fnames[1]}"' if split else ""
+        separator_reminder = (
+            ""
+            if len(common_prompt_suffix_new_line_handled) == 0
+            else f"After you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt."
+        )
+        sys.stdout.write(
+            f'\nWrote modified file{files_string}`\nFeel free to take a look!\n\nNow use that file when fine-tuning:\n> openai api fine_tunes.create -t "{fnames[0]}"{valid_string}{additional_params}\n\n{separator_reminder}{optional_ending_string}\n'
+        )
+        estimate_fine_tuning_time(df)
+    else:
+        sys.stdout.write("Aborting... did not write the file\n")
+
+
+def infer_task_type(df: pd.DataFrame) -> str:
+    """
+    Infer the likely fine-tuning task type from the data
+    """
+    CLASSIFICATION_THRESHOLD = 3  # min_average instances of each class
+    if sum(df.prompt.str.len()) == 0:
+        return "open-ended generation"
+
+    if len(df.completion.unique()) < len(df) / CLASSIFICATION_THRESHOLD:
+        return "classification"
+
+    return "conditional generation"
+
+
+def get_common_xfix(series: Any, xfix: str = "suffix") -> str:
+    """
+    Finds the longest common suffix or prefix of all the values in a series
+    """
+    common_xfix = ""
+    while True:
+        common_xfixes = (
+            series.str[-(len(common_xfix) + 1) :] if xfix == "suffix" else series.str[: len(common_xfix) + 1]
+        )  # first few or last few characters
+        if common_xfixes.nunique() != 1:  # we found the character at which we don't have a unique xfix anymore
+            break
+        elif common_xfix == common_xfixes.values[0]:  # the entire first row is a prefix of every other row
+            break
+        else:  # the first or last few characters are still common across all rows - let's try to add one more
+            common_xfix = common_xfixes.values[0]
+    return common_xfix
+
+
+Validator: TypeAlias = "Callable[[pd.DataFrame], Remediation | None]"
+
+
+def get_validators() -> list[Validator]:
+    return [
+        num_examples_validator,
+        lambda x: necessary_column_validator(x, "prompt"),
+        lambda x: necessary_column_validator(x, "completion"),
+        additional_column_validator,
+        non_empty_field_validator,
+        format_inferrer_validator,
+        duplicated_rows_validator,
+        long_examples_validator,
+        lambda x: lower_case_validator(x, "prompt"),
+        lambda x: lower_case_validator(x, "completion"),
+        common_prompt_suffix_validator,
+        common_prompt_prefix_validator,
+        common_completion_prefix_validator,
+        common_completion_suffix_validator,
+        completions_space_start_validator,
+    ]
+
+
+def apply_validators(
+    df: pd.DataFrame,
+    fname: str,
+    remediation: Remediation | None,
+    validators: list[Validator],
+    auto_accept: bool,
+    write_out_file_func: Callable[..., Any],
+) -> None:
+    optional_remediations: list[Remediation] = []
+    if remediation is not None:
+        optional_remediations.append(remediation)
+    for validator in validators:
+        remediation = validator(df)
+        if remediation is not None:
+            optional_remediations.append(remediation)
+            df = apply_necessary_remediation(df, remediation)
+
+    any_optional_or_necessary_remediations = any(
+        [
+            remediation
+            for remediation in optional_remediations
+            if remediation.optional_msg is not None or remediation.necessary_msg is not None
+        ]
+    )
+    any_necessary_applied = any(
+        [remediation for remediation in optional_remediations if remediation.necessary_msg is not None]
+    )
+    any_optional_applied = False
+
+    if any_optional_or_necessary_remediations:
+        sys.stdout.write("\n\nBased on the analysis we will perform the following actions:\n")
+        for remediation in optional_remediations:
+            df, optional_applied = apply_optional_remediation(df, remediation, auto_accept)
+            any_optional_applied = any_optional_applied or optional_applied
+    else:
+        sys.stdout.write("\n\nNo remediations found.\n")
+
+    any_optional_or_necessary_applied = any_optional_applied or any_necessary_applied
+
+    write_out_file_func(df, fname, any_optional_or_necessary_applied, auto_accept)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/azure.py b/.venv/lib/python3.12/site-packages/openai/lib/azure.py
new file mode 100644
index 00000000..ea7bd20d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/azure.py
@@ -0,0 +1,632 @@
+from __future__ import annotations
+
+import os
+import inspect
+from typing import Any, Union, Mapping, TypeVar, Callable, Awaitable, cast, overload
+from typing_extensions import Self, override
+
+import httpx
+
+from .._types import NOT_GIVEN, Omit, Query, Timeout, NotGiven
+from .._utils import is_given, is_mapping
+from .._client import OpenAI, AsyncOpenAI
+from .._compat import model_copy
+from .._models import FinalRequestOptions
+from .._streaming import Stream, AsyncStream
+from .._exceptions import OpenAIError
+from .._base_client import DEFAULT_MAX_RETRIES, BaseClient
+
+_deployments_endpoints = set(
+    [
+        "/completions",
+        "/chat/completions",
+        "/embeddings",
+        "/audio/transcriptions",
+        "/audio/translations",
+        "/audio/speech",
+        "/images/generations",
+    ]
+)
+
+
+AzureADTokenProvider = Callable[[], str]
+AsyncAzureADTokenProvider = Callable[[], "str | Awaitable[str]"]
+_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient])
+_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]])
+
+
+# we need to use a sentinel API key value for Azure AD
+# as we don't want to make the `api_key` in the main client Optional
+# and Azure AD tokens may be retrieved on a per-request basis
+API_KEY_SENTINEL = "".join(["<", "missing API key", ">"])
+
+
+class MutuallyExclusiveAuthError(OpenAIError):
+    def __init__(self) -> None:
+        super().__init__(
+            "The `api_key`, `azure_ad_token` and `azure_ad_token_provider` arguments are mutually exclusive; Only one can be passed at a time"
+        )
+
+
+class BaseAzureClient(BaseClient[_HttpxClientT, _DefaultStreamT]):
+    _azure_endpoint: httpx.URL | None
+    _azure_deployment: str | None
+
+    @override
+    def _build_request(
+        self,
+        options: FinalRequestOptions,
+        *,
+        retries_taken: int = 0,
+    ) -> httpx.Request:
+        if options.url in _deployments_endpoints and is_mapping(options.json_data):
+            model = options.json_data.get("model")
+            if model is not None and "/deployments" not in str(self.base_url.path):
+                options.url = f"/deployments/{model}{options.url}"
+
+        return super()._build_request(options, retries_taken=retries_taken)
+
+    @override
+    def _prepare_url(self, url: str) -> httpx.URL:
+        """Adjust the URL if the client was configured with an Azure endpoint + deployment
+        and the API feature being called is **not** a deployments-based endpoint
+        (i.e. requires /deployments/deployment-name in the URL path).
+        """
+        if self._azure_deployment and self._azure_endpoint and url not in _deployments_endpoints:
+            merge_url = httpx.URL(url)
+            if merge_url.is_relative_url:
+                merge_raw_path = (
+                    self._azure_endpoint.raw_path.rstrip(b"/") + b"/openai/" + merge_url.raw_path.lstrip(b"/")
+                )
+                return self._azure_endpoint.copy_with(raw_path=merge_raw_path)
+
+            return merge_url
+
+        return super()._prepare_url(url)
+
+
+class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI):
+    @overload
+    def __init__(
+        self,
+        *,
+        azure_endpoint: str,
+        azure_deployment: str | None = None,
+        api_version: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AzureADTokenProvider | None = None,
+        organization: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.Client | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None: ...
+
+    @overload
+    def __init__(
+        self,
+        *,
+        azure_deployment: str | None = None,
+        api_version: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AzureADTokenProvider | None = None,
+        organization: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.Client | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None: ...
+
+    @overload
+    def __init__(
+        self,
+        *,
+        base_url: str,
+        api_version: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AzureADTokenProvider | None = None,
+        organization: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.Client | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None: ...
+
+    def __init__(
+        self,
+        *,
+        api_version: str | None = None,
+        azure_endpoint: str | None = None,
+        azure_deployment: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AzureADTokenProvider | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        base_url: str | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.Client | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None:
+        """Construct a new synchronous azure openai client instance.
+
+        This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+        - `api_key` from `AZURE_OPENAI_API_KEY`
+        - `organization` from `OPENAI_ORG_ID`
+        - `project` from `OPENAI_PROJECT_ID`
+        - `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN`
+        - `api_version` from `OPENAI_API_VERSION`
+        - `azure_endpoint` from `AZURE_OPENAI_ENDPOINT`
+
+        Args:
+            azure_endpoint: Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`
+
+            azure_ad_token: Your Azure Active Directory token, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id
+
+            azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request.
+
+            azure_deployment: A model deployment, if given with `azure_endpoint`, sets the base client URL to include `/deployments/{azure_deployment}`.
+                Not supported with Assistants APIs.
+        """
+        if api_key is None:
+            api_key = os.environ.get("AZURE_OPENAI_API_KEY")
+
+        if azure_ad_token is None:
+            azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN")
+
+        if api_key is None and azure_ad_token is None and azure_ad_token_provider is None:
+            raise OpenAIError(
+                "Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables."
+            )
+
+        if api_version is None:
+            api_version = os.environ.get("OPENAI_API_VERSION")
+
+        if api_version is None:
+            raise ValueError(
+                "Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable"
+            )
+
+        if default_query is None:
+            default_query = {"api-version": api_version}
+        else:
+            default_query = {**default_query, "api-version": api_version}
+
+        if base_url is None:
+            if azure_endpoint is None:
+                azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
+
+            if azure_endpoint is None:
+                raise ValueError(
+                    "Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable"
+                )
+
+            if azure_deployment is not None:
+                base_url = f"{azure_endpoint.rstrip('/')}/openai/deployments/{azure_deployment}"
+            else:
+                base_url = f"{azure_endpoint.rstrip('/')}/openai"
+        else:
+            if azure_endpoint is not None:
+                raise ValueError("base_url and azure_endpoint are mutually exclusive")
+
+        if api_key is None:
+            # define a sentinel value to avoid any typing issues
+            api_key = API_KEY_SENTINEL
+
+        super().__init__(
+            api_key=api_key,
+            organization=organization,
+            project=project,
+            base_url=base_url,
+            timeout=timeout,
+            max_retries=max_retries,
+            default_headers=default_headers,
+            default_query=default_query,
+            http_client=http_client,
+            websocket_base_url=websocket_base_url,
+            _strict_response_validation=_strict_response_validation,
+        )
+        self._api_version = api_version
+        self._azure_ad_token = azure_ad_token
+        self._azure_ad_token_provider = azure_ad_token_provider
+        self._azure_deployment = azure_deployment if azure_endpoint else None
+        self._azure_endpoint = httpx.URL(azure_endpoint) if azure_endpoint else None
+
+    @override
+    def copy(
+        self,
+        *,
+        api_key: str | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        api_version: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AzureADTokenProvider | None = None,
+        base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        http_client: httpx.Client | None = None,
+        max_retries: int | NotGiven = NOT_GIVEN,
+        default_headers: Mapping[str, str] | None = None,
+        set_default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        set_default_query: Mapping[str, object] | None = None,
+        _extra_kwargs: Mapping[str, Any] = {},
+    ) -> Self:
+        """
+        Create a new client instance re-using the same options given to the current client with optional overriding.
+        """
+        return super().copy(
+            api_key=api_key,
+            organization=organization,
+            project=project,
+            websocket_base_url=websocket_base_url,
+            base_url=base_url,
+            timeout=timeout,
+            http_client=http_client,
+            max_retries=max_retries,
+            default_headers=default_headers,
+            set_default_headers=set_default_headers,
+            default_query=default_query,
+            set_default_query=set_default_query,
+            _extra_kwargs={
+                "api_version": api_version or self._api_version,
+                "azure_ad_token": azure_ad_token or self._azure_ad_token,
+                "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider,
+                **_extra_kwargs,
+            },
+        )
+
+    with_options = copy
+
+    def _get_azure_ad_token(self) -> str | None:
+        if self._azure_ad_token is not None:
+            return self._azure_ad_token
+
+        provider = self._azure_ad_token_provider
+        if provider is not None:
+            token = provider()
+            if not token or not isinstance(token, str):  # pyright: ignore[reportUnnecessaryIsInstance]
+                raise ValueError(
+                    f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}",
+                )
+            return token
+
+        return None
+
+    @override
+    def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
+        headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {}
+
+        options = model_copy(options)
+        options.headers = headers
+
+        azure_ad_token = self._get_azure_ad_token()
+        if azure_ad_token is not None:
+            if headers.get("Authorization") is None:
+                headers["Authorization"] = f"Bearer {azure_ad_token}"
+        elif self.api_key is not API_KEY_SENTINEL:
+            if headers.get("api-key") is None:
+                headers["api-key"] = self.api_key
+        else:
+            # should never be hit
+            raise ValueError("Unable to handle auth")
+
+        return options
+
+    def _configure_realtime(self, model: str, extra_query: Query) -> tuple[httpx.URL, dict[str, str]]:
+        auth_headers = {}
+        query = {
+            **extra_query,
+            "api-version": self._api_version,
+            "deployment": self._azure_deployment or model,
+        }
+        if self.api_key != "<missing API key>":
+            auth_headers = {"api-key": self.api_key}
+        else:
+            token = self._get_azure_ad_token()
+            if token:
+                auth_headers = {"Authorization": f"Bearer {token}"}
+
+        if self.websocket_base_url is not None:
+            base_url = httpx.URL(self.websocket_base_url)
+            merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
+            realtime_url = base_url.copy_with(raw_path=merge_raw_path)
+        else:
+            base_url = self._prepare_url("/realtime")
+            realtime_url = base_url.copy_with(scheme="wss")
+
+        url = realtime_url.copy_with(params={**query})
+        return url, auth_headers
+
+
+class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], AsyncOpenAI):
+    @overload
+    def __init__(
+        self,
+        *,
+        azure_endpoint: str,
+        azure_deployment: str | None = None,
+        api_version: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.AsyncClient | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None: ...
+
+    @overload
+    def __init__(
+        self,
+        *,
+        azure_deployment: str | None = None,
+        api_version: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.AsyncClient | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None: ...
+
+    @overload
+    def __init__(
+        self,
+        *,
+        base_url: str,
+        api_version: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.AsyncClient | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None: ...
+
+    def __init__(
+        self,
+        *,
+        azure_endpoint: str | None = None,
+        azure_deployment: str | None = None,
+        api_version: str | None = None,
+        api_key: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        base_url: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        max_retries: int = DEFAULT_MAX_RETRIES,
+        default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        http_client: httpx.AsyncClient | None = None,
+        _strict_response_validation: bool = False,
+    ) -> None:
+        """Construct a new asynchronous azure openai client instance.
+
+        This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+        - `api_key` from `AZURE_OPENAI_API_KEY`
+        - `organization` from `OPENAI_ORG_ID`
+        - `project` from `OPENAI_PROJECT_ID`
+        - `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN`
+        - `api_version` from `OPENAI_API_VERSION`
+        - `azure_endpoint` from `AZURE_OPENAI_ENDPOINT`
+
+        Args:
+            azure_endpoint: Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`
+
+            azure_ad_token: Your Azure Active Directory token, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id
+
+            azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request.
+
+            azure_deployment: A model deployment, if given with `azure_endpoint`, sets the base client URL to include `/deployments/{azure_deployment}`.
+                Not supported with Assistants APIs.
+        """
+        if api_key is None:
+            api_key = os.environ.get("AZURE_OPENAI_API_KEY")
+
+        if azure_ad_token is None:
+            azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN")
+
+        if api_key is None and azure_ad_token is None and azure_ad_token_provider is None:
+            raise OpenAIError(
+                "Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables."
+            )
+
+        if api_version is None:
+            api_version = os.environ.get("OPENAI_API_VERSION")
+
+        if api_version is None:
+            raise ValueError(
+                "Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable"
+            )
+
+        if default_query is None:
+            default_query = {"api-version": api_version}
+        else:
+            default_query = {**default_query, "api-version": api_version}
+
+        if base_url is None:
+            if azure_endpoint is None:
+                azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
+
+            if azure_endpoint is None:
+                raise ValueError(
+                    "Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable"
+                )
+
+            if azure_deployment is not None:
+                base_url = f"{azure_endpoint.rstrip('/')}/openai/deployments/{azure_deployment}"
+            else:
+                base_url = f"{azure_endpoint.rstrip('/')}/openai"
+        else:
+            if azure_endpoint is not None:
+                raise ValueError("base_url and azure_endpoint are mutually exclusive")
+
+        if api_key is None:
+            # define a sentinel value to avoid any typing issues
+            api_key = API_KEY_SENTINEL
+
+        super().__init__(
+            api_key=api_key,
+            organization=organization,
+            project=project,
+            base_url=base_url,
+            timeout=timeout,
+            max_retries=max_retries,
+            default_headers=default_headers,
+            default_query=default_query,
+            http_client=http_client,
+            websocket_base_url=websocket_base_url,
+            _strict_response_validation=_strict_response_validation,
+        )
+        self._api_version = api_version
+        self._azure_ad_token = azure_ad_token
+        self._azure_ad_token_provider = azure_ad_token_provider
+        self._azure_deployment = azure_deployment if azure_endpoint else None
+        self._azure_endpoint = httpx.URL(azure_endpoint) if azure_endpoint else None
+
+    @override
+    def copy(
+        self,
+        *,
+        api_key: str | None = None,
+        organization: str | None = None,
+        project: str | None = None,
+        websocket_base_url: str | httpx.URL | None = None,
+        api_version: str | None = None,
+        azure_ad_token: str | None = None,
+        azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
+        base_url: str | httpx.URL | None = None,
+        timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+        http_client: httpx.AsyncClient | None = None,
+        max_retries: int | NotGiven = NOT_GIVEN,
+        default_headers: Mapping[str, str] | None = None,
+        set_default_headers: Mapping[str, str] | None = None,
+        default_query: Mapping[str, object] | None = None,
+        set_default_query: Mapping[str, object] | None = None,
+        _extra_kwargs: Mapping[str, Any] = {},
+    ) -> Self:
+        """
+        Create a new client instance re-using the same options given to the current client with optional overriding.
+        """
+        return super().copy(
+            api_key=api_key,
+            organization=organization,
+            project=project,
+            websocket_base_url=websocket_base_url,
+            base_url=base_url,
+            timeout=timeout,
+            http_client=http_client,
+            max_retries=max_retries,
+            default_headers=default_headers,
+            set_default_headers=set_default_headers,
+            default_query=default_query,
+            set_default_query=set_default_query,
+            _extra_kwargs={
+                "api_version": api_version or self._api_version,
+                "azure_ad_token": azure_ad_token or self._azure_ad_token,
+                "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider,
+                **_extra_kwargs,
+            },
+        )
+
+    with_options = copy
+
+    async def _get_azure_ad_token(self) -> str | None:
+        if self._azure_ad_token is not None:
+            return self._azure_ad_token
+
+        provider = self._azure_ad_token_provider
+        if provider is not None:
+            token = provider()
+            if inspect.isawaitable(token):
+                token = await token
+            if not token or not isinstance(cast(Any, token), str):
+                raise ValueError(
+                    f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}",
+                )
+            return str(token)
+
+        return None
+
+    @override
+    async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
+        headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {}
+
+        options = model_copy(options)
+        options.headers = headers
+
+        azure_ad_token = await self._get_azure_ad_token()
+        if azure_ad_token is not None:
+            if headers.get("Authorization") is None:
+                headers["Authorization"] = f"Bearer {azure_ad_token}"
+        elif self.api_key is not API_KEY_SENTINEL:
+            if headers.get("api-key") is None:
+                headers["api-key"] = self.api_key
+        else:
+            # should never be hit
+            raise ValueError("Unable to handle auth")
+
+        return options
+
+    async def _configure_realtime(self, model: str, extra_query: Query) -> tuple[httpx.URL, dict[str, str]]:
+        auth_headers = {}
+        query = {
+            **extra_query,
+            "api-version": self._api_version,
+            "deployment": self._azure_deployment or model,
+        }
+        if self.api_key != "<missing API key>":
+            auth_headers = {"api-key": self.api_key}
+        else:
+            token = await self._get_azure_ad_token()
+            if token:
+                auth_headers = {"Authorization": f"Bearer {token}"}
+
+        if self.websocket_base_url is not None:
+            base_url = httpx.URL(self.websocket_base_url)
+            merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
+            realtime_url = base_url.copy_with(raw_path=merge_raw_path)
+        else:
+            base_url = self._prepare_url("/realtime")
+            realtime_url = base_url.copy_with(scheme="wss")
+
+        url = realtime_url.copy_with(params={**query})
+        return url, auth_headers
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/__init__.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/__init__.py
new file mode 100644
index 00000000..eb378d25
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/__init__.py
@@ -0,0 +1,8 @@
+from ._assistants import (
+    AssistantEventHandler as AssistantEventHandler,
+    AssistantEventHandlerT as AssistantEventHandlerT,
+    AssistantStreamManager as AssistantStreamManager,
+    AsyncAssistantEventHandler as AsyncAssistantEventHandler,
+    AsyncAssistantEventHandlerT as AsyncAssistantEventHandlerT,
+    AsyncAssistantStreamManager as AsyncAssistantStreamManager,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/_assistants.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/_assistants.py
new file mode 100644
index 00000000..6efb3ca3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/_assistants.py
@@ -0,0 +1,1038 @@
+from __future__ import annotations
+
+import asyncio
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, Callable, Iterable, Iterator, cast
+from typing_extensions import Awaitable, AsyncIterable, AsyncIterator, assert_never
+
+import httpx
+
+from ..._utils import is_dict, is_list, consume_sync_iterator, consume_async_iterator
+from ..._compat import model_dump
+from ..._models import construct_type
+from ..._streaming import Stream, AsyncStream
+from ...types.beta import AssistantStreamEvent
+from ...types.beta.threads import (
+    Run,
+    Text,
+    Message,
+    ImageFile,
+    TextDelta,
+    MessageDelta,
+    MessageContent,
+    MessageContentDelta,
+)
+from ...types.beta.threads.runs import RunStep, ToolCall, RunStepDelta, ToolCallDelta
+
+
+class AssistantEventHandler:
+    text_deltas: Iterable[str]
+    """Iterator over just the text deltas in the stream.
+
+    This corresponds to the `thread.message.delta` event
+    in the API.
+
+    ```py
+    for text in stream.text_deltas:
+        print(text, end="", flush=True)
+    print()
+    ```
+    """
+
+    def __init__(self) -> None:
+        self._current_event: AssistantStreamEvent | None = None
+        self._current_message_content_index: int | None = None
+        self._current_message_content: MessageContent | None = None
+        self._current_tool_call_index: int | None = None
+        self._current_tool_call: ToolCall | None = None
+        self.__current_run_step_id: str | None = None
+        self.__current_run: Run | None = None
+        self.__run_step_snapshots: dict[str, RunStep] = {}
+        self.__message_snapshots: dict[str, Message] = {}
+        self.__current_message_snapshot: Message | None = None
+
+        self.text_deltas = self.__text_deltas__()
+        self._iterator = self.__stream__()
+        self.__stream: Stream[AssistantStreamEvent] | None = None
+
+    def _init(self, stream: Stream[AssistantStreamEvent]) -> None:
+        if self.__stream:
+            raise RuntimeError(
+                "A single event handler cannot be shared between multiple streams; You will need to construct a new event handler instance"
+            )
+
+        self.__stream = stream
+
+    def __next__(self) -> AssistantStreamEvent:
+        return self._iterator.__next__()
+
+    def __iter__(self) -> Iterator[AssistantStreamEvent]:
+        for item in self._iterator:
+            yield item
+
+    @property
+    def current_event(self) -> AssistantStreamEvent | None:
+        return self._current_event
+
+    @property
+    def current_run(self) -> Run | None:
+        return self.__current_run
+
+    @property
+    def current_run_step_snapshot(self) -> RunStep | None:
+        if not self.__current_run_step_id:
+            return None
+
+        return self.__run_step_snapshots[self.__current_run_step_id]
+
+    @property
+    def current_message_snapshot(self) -> Message | None:
+        return self.__current_message_snapshot
+
+    def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called when the context manager exits.
+        """
+        if self.__stream:
+            self.__stream.close()
+
+    def until_done(self) -> None:
+        """Waits until the stream has been consumed"""
+        consume_sync_iterator(self)
+
+    def get_final_run(self) -> Run:
+        """Wait for the stream to finish and returns the completed Run object"""
+        self.until_done()
+
+        if not self.__current_run:
+            raise RuntimeError("No final run object found")
+
+        return self.__current_run
+
+    def get_final_run_steps(self) -> list[RunStep]:
+        """Wait for the stream to finish and returns the steps taken in this run"""
+        self.until_done()
+
+        if not self.__run_step_snapshots:
+            raise RuntimeError("No run steps found")
+
+        return [step for step in self.__run_step_snapshots.values()]
+
+    def get_final_messages(self) -> list[Message]:
+        """Wait for the stream to finish and returns the messages emitted in this run"""
+        self.until_done()
+
+        if not self.__message_snapshots:
+            raise RuntimeError("No messages found")
+
+        return [message for message in self.__message_snapshots.values()]
+
+    def __text_deltas__(self) -> Iterator[str]:
+        for event in self:
+            if event.event != "thread.message.delta":
+                continue
+
+            for content_delta in event.data.delta.content or []:
+                if content_delta.type == "text" and content_delta.text and content_delta.text.value:
+                    yield content_delta.text.value
+
+    # event handlers
+
+    def on_end(self) -> None:
+        """Fires when the stream has finished.
+
+        This happens if the stream is read to completion
+        or if an exception occurs during iteration.
+        """
+
+    def on_event(self, event: AssistantStreamEvent) -> None:
+        """Callback that is fired for every Server-Sent-Event"""
+
+    def on_run_step_created(self, run_step: RunStep) -> None:
+        """Callback that is fired when a run step is created"""
+
+    def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:
+        """Callback that is fired whenever a run step delta is returned from the API
+
+        The first argument is just the delta as sent by the API and the second argument
+        is the accumulated snapshot of the run step. For example, a tool calls event may
+        look like this:
+
+        # delta
+        tool_calls=[
+            RunStepDeltaToolCallsCodeInterpreter(
+                index=0,
+                type='code_interpreter',
+                id=None,
+                code_interpreter=CodeInterpreter(input=' sympy', outputs=None)
+            )
+        ]
+        # snapshot
+        tool_calls=[
+            CodeToolCall(
+                id='call_wKayJlcYV12NiadiZuJXxcfx',
+                code_interpreter=CodeInterpreter(input='from sympy', outputs=[]),
+                type='code_interpreter',
+                index=0
+            )
+        ],
+        """
+
+    def on_run_step_done(self, run_step: RunStep) -> None:
+        """Callback that is fired when a run step is completed"""
+
+    def on_tool_call_created(self, tool_call: ToolCall) -> None:
+        """Callback that is fired when a tool call is created"""
+
+    def on_tool_call_delta(self, delta: ToolCallDelta, snapshot: ToolCall) -> None:
+        """Callback that is fired when a tool call delta is encountered"""
+
+    def on_tool_call_done(self, tool_call: ToolCall) -> None:
+        """Callback that is fired when a tool call delta is encountered"""
+
+    def on_exception(self, exception: Exception) -> None:
+        """Fired whenever an exception happens during streaming"""
+
+    def on_timeout(self) -> None:
+        """Fires if the request times out"""
+
+    def on_message_created(self, message: Message) -> None:
+        """Callback that is fired when a message is created"""
+
+    def on_message_delta(self, delta: MessageDelta, snapshot: Message) -> None:
+        """Callback that is fired whenever a message delta is returned from the API
+
+        The first argument is just the delta as sent by the API and the second argument
+        is the accumulated snapshot of the message. For example, a text content event may
+        look like this:
+
+        # delta
+        MessageDeltaText(
+            index=0,
+            type='text',
+            text=Text(
+                value=' Jane'
+            ),
+        )
+        # snapshot
+        MessageContentText(
+            index=0,
+            type='text',
+            text=Text(
+                value='Certainly, Jane'
+            ),
+        )
+        """
+
+    def on_message_done(self, message: Message) -> None:
+        """Callback that is fired when a message is completed"""
+
+    def on_text_created(self, text: Text) -> None:
+        """Callback that is fired when a text content block is created"""
+
+    def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:
+        """Callback that is fired whenever a text content delta is returned
+        by the API.
+
+        The first argument is just the delta as sent by the API and the second argument
+        is the accumulated snapshot of the text. For example:
+
+        on_text_delta(TextDelta(value="The"), Text(value="The")),
+        on_text_delta(TextDelta(value=" solution"), Text(value="The solution")),
+        on_text_delta(TextDelta(value=" to"), Text(value="The solution to")),
+        on_text_delta(TextDelta(value=" the"), Text(value="The solution to the")),
+        on_text_delta(TextDelta(value=" equation"), Text(value="The solution to the equation")),
+        """
+
+    def on_text_done(self, text: Text) -> None:
+        """Callback that is fired when a text content block is finished"""
+
+    def on_image_file_done(self, image_file: ImageFile) -> None:
+        """Callback that is fired when an image file block is finished"""
+
+    def _emit_sse_event(self, event: AssistantStreamEvent) -> None:
+        self._current_event = event
+        self.on_event(event)
+
+        self.__current_message_snapshot, new_content = accumulate_event(
+            event=event,
+            current_message_snapshot=self.__current_message_snapshot,
+        )
+        if self.__current_message_snapshot is not None:
+            self.__message_snapshots[self.__current_message_snapshot.id] = self.__current_message_snapshot
+
+        accumulate_run_step(
+            event=event,
+            run_step_snapshots=self.__run_step_snapshots,
+        )
+
+        for content_delta in new_content:
+            assert self.__current_message_snapshot is not None
+
+            block = self.__current_message_snapshot.content[content_delta.index]
+            if block.type == "text":
+                self.on_text_created(block.text)
+
+        if (
+            event.event == "thread.run.completed"
+            or event.event == "thread.run.cancelled"
+            or event.event == "thread.run.expired"
+            or event.event == "thread.run.failed"
+            or event.event == "thread.run.requires_action"
+            or event.event == "thread.run.incomplete"
+        ):
+            self.__current_run = event.data
+            if self._current_tool_call:
+                self.on_tool_call_done(self._current_tool_call)
+        elif (
+            event.event == "thread.run.created"
+            or event.event == "thread.run.in_progress"
+            or event.event == "thread.run.cancelling"
+            or event.event == "thread.run.queued"
+        ):
+            self.__current_run = event.data
+        elif event.event == "thread.message.created":
+            self.on_message_created(event.data)
+        elif event.event == "thread.message.delta":
+            snapshot = self.__current_message_snapshot
+            assert snapshot is not None
+
+            message_delta = event.data.delta
+            if message_delta.content is not None:
+                for content_delta in message_delta.content:
+                    if content_delta.type == "text" and content_delta.text:
+                        snapshot_content = snapshot.content[content_delta.index]
+                        assert snapshot_content.type == "text"
+                        self.on_text_delta(content_delta.text, snapshot_content.text)
+
+                    # If the delta is for a new message content:
+                    # - emit on_text_done/on_image_file_done for the previous message content
+                    # - emit on_text_created/on_image_created for the new message content
+                    if content_delta.index != self._current_message_content_index:
+                        if self._current_message_content is not None:
+                            if self._current_message_content.type == "text":
+                                self.on_text_done(self._current_message_content.text)
+                            elif self._current_message_content.type == "image_file":
+                                self.on_image_file_done(self._current_message_content.image_file)
+
+                        self._current_message_content_index = content_delta.index
+                        self._current_message_content = snapshot.content[content_delta.index]
+
+                    # Update the current_message_content (delta event is correctly emitted already)
+                    self._current_message_content = snapshot.content[content_delta.index]
+
+            self.on_message_delta(event.data.delta, snapshot)
+        elif event.event == "thread.message.completed" or event.event == "thread.message.incomplete":
+            self.__current_message_snapshot = event.data
+            self.__message_snapshots[event.data.id] = event.data
+
+            if self._current_message_content_index is not None:
+                content = event.data.content[self._current_message_content_index]
+                if content.type == "text":
+                    self.on_text_done(content.text)
+                elif content.type == "image_file":
+                    self.on_image_file_done(content.image_file)
+
+            self.on_message_done(event.data)
+        elif event.event == "thread.run.step.created":
+            self.__current_run_step_id = event.data.id
+            self.on_run_step_created(event.data)
+        elif event.event == "thread.run.step.in_progress":
+            self.__current_run_step_id = event.data.id
+        elif event.event == "thread.run.step.delta":
+            step_snapshot = self.__run_step_snapshots[event.data.id]
+
+            run_step_delta = event.data.delta
+            if (
+                run_step_delta.step_details
+                and run_step_delta.step_details.type == "tool_calls"
+                and run_step_delta.step_details.tool_calls is not None
+            ):
+                assert step_snapshot.step_details.type == "tool_calls"
+                for tool_call_delta in run_step_delta.step_details.tool_calls:
+                    if tool_call_delta.index == self._current_tool_call_index:
+                        self.on_tool_call_delta(
+                            tool_call_delta,
+                            step_snapshot.step_details.tool_calls[tool_call_delta.index],
+                        )
+
+                    # If the delta is for a new tool call:
+                    # - emit on_tool_call_done for the previous tool_call
+                    # - emit on_tool_call_created for the new tool_call
+                    if tool_call_delta.index != self._current_tool_call_index:
+                        if self._current_tool_call is not None:
+                            self.on_tool_call_done(self._current_tool_call)
+
+                        self._current_tool_call_index = tool_call_delta.index
+                        self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index]
+                        self.on_tool_call_created(self._current_tool_call)
+
+                    # Update the current_tool_call (delta event is correctly emitted already)
+                    self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index]
+
+            self.on_run_step_delta(
+                event.data.delta,
+                step_snapshot,
+            )
+        elif (
+            event.event == "thread.run.step.completed"
+            or event.event == "thread.run.step.cancelled"
+            or event.event == "thread.run.step.expired"
+            or event.event == "thread.run.step.failed"
+        ):
+            if self._current_tool_call:
+                self.on_tool_call_done(self._current_tool_call)
+
+            self.on_run_step_done(event.data)
+            self.__current_run_step_id = None
+        elif event.event == "thread.created" or event.event == "thread.message.in_progress" or event.event == "error":
+            # currently no special handling
+            ...
+        else:
+            # we only want to error at build-time
+            if TYPE_CHECKING:  # type: ignore[unreachable]
+                assert_never(event)
+
+        self._current_event = None
+
+    def __stream__(self) -> Iterator[AssistantStreamEvent]:
+        stream = self.__stream
+        if not stream:
+            raise RuntimeError("Stream has not been started yet")
+
+        try:
+            for event in stream:
+                self._emit_sse_event(event)
+
+                yield event
+        except (httpx.TimeoutException, asyncio.TimeoutError) as exc:
+            self.on_timeout()
+            self.on_exception(exc)
+            raise
+        except Exception as exc:
+            self.on_exception(exc)
+            raise
+        finally:
+            self.on_end()
+
+
+AssistantEventHandlerT = TypeVar("AssistantEventHandlerT", bound=AssistantEventHandler)
+
+
+class AssistantStreamManager(Generic[AssistantEventHandlerT]):
+    """Wrapper over AssistantStreamEventHandler that is returned by `.stream()`
+    so that a context manager can be used.
+
+    ```py
+    with client.threads.create_and_run_stream(...) as stream:
+        for event in stream:
+            ...
+    ```
+    """
+
+    def __init__(
+        self,
+        api_request: Callable[[], Stream[AssistantStreamEvent]],
+        *,
+        event_handler: AssistantEventHandlerT,
+    ) -> None:
+        self.__stream: Stream[AssistantStreamEvent] | None = None
+        self.__event_handler = event_handler
+        self.__api_request = api_request
+
+    def __enter__(self) -> AssistantEventHandlerT:
+        self.__stream = self.__api_request()
+        self.__event_handler._init(self.__stream)
+        return self.__event_handler
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            self.__stream.close()
+
+
+class AsyncAssistantEventHandler:
+    text_deltas: AsyncIterable[str]
+    """Iterator over just the text deltas in the stream.
+
+    This corresponds to the `thread.message.delta` event
+    in the API.
+
+    ```py
+    async for text in stream.text_deltas:
+        print(text, end="", flush=True)
+    print()
+    ```
+    """
+
+    def __init__(self) -> None:
+        self._current_event: AssistantStreamEvent | None = None
+        self._current_message_content_index: int | None = None
+        self._current_message_content: MessageContent | None = None
+        self._current_tool_call_index: int | None = None
+        self._current_tool_call: ToolCall | None = None
+        self.__current_run_step_id: str | None = None
+        self.__current_run: Run | None = None
+        self.__run_step_snapshots: dict[str, RunStep] = {}
+        self.__message_snapshots: dict[str, Message] = {}
+        self.__current_message_snapshot: Message | None = None
+
+        self.text_deltas = self.__text_deltas__()
+        self._iterator = self.__stream__()
+        self.__stream: AsyncStream[AssistantStreamEvent] | None = None
+
+    def _init(self, stream: AsyncStream[AssistantStreamEvent]) -> None:
+        if self.__stream:
+            raise RuntimeError(
+                "A single event handler cannot be shared between multiple streams; You will need to construct a new event handler instance"
+            )
+
+        self.__stream = stream
+
+    async def __anext__(self) -> AssistantStreamEvent:
+        return await self._iterator.__anext__()
+
+    async def __aiter__(self) -> AsyncIterator[AssistantStreamEvent]:
+        async for item in self._iterator:
+            yield item
+
+    async def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called when the context manager exits.
+        """
+        if self.__stream:
+            await self.__stream.close()
+
+    @property
+    def current_event(self) -> AssistantStreamEvent | None:
+        return self._current_event
+
+    @property
+    def current_run(self) -> Run | None:
+        return self.__current_run
+
+    @property
+    def current_run_step_snapshot(self) -> RunStep | None:
+        if not self.__current_run_step_id:
+            return None
+
+        return self.__run_step_snapshots[self.__current_run_step_id]
+
+    @property
+    def current_message_snapshot(self) -> Message | None:
+        return self.__current_message_snapshot
+
+    async def until_done(self) -> None:
+        """Waits until the stream has been consumed"""
+        await consume_async_iterator(self)
+
+    async def get_final_run(self) -> Run:
+        """Wait for the stream to finish and returns the completed Run object"""
+        await self.until_done()
+
+        if not self.__current_run:
+            raise RuntimeError("No final run object found")
+
+        return self.__current_run
+
+    async def get_final_run_steps(self) -> list[RunStep]:
+        """Wait for the stream to finish and returns the steps taken in this run"""
+        await self.until_done()
+
+        if not self.__run_step_snapshots:
+            raise RuntimeError("No run steps found")
+
+        return [step for step in self.__run_step_snapshots.values()]
+
+    async def get_final_messages(self) -> list[Message]:
+        """Wait for the stream to finish and returns the messages emitted in this run"""
+        await self.until_done()
+
+        if not self.__message_snapshots:
+            raise RuntimeError("No messages found")
+
+        return [message for message in self.__message_snapshots.values()]
+
+    async def __text_deltas__(self) -> AsyncIterator[str]:
+        async for event in self:
+            if event.event != "thread.message.delta":
+                continue
+
+            for content_delta in event.data.delta.content or []:
+                if content_delta.type == "text" and content_delta.text and content_delta.text.value:
+                    yield content_delta.text.value
+
+    # event handlers
+
+    async def on_end(self) -> None:
+        """Fires when the stream has finished.
+
+        This happens if the stream is read to completion
+        or if an exception occurs during iteration.
+        """
+
+    async def on_event(self, event: AssistantStreamEvent) -> None:
+        """Callback that is fired for every Server-Sent-Event"""
+
+    async def on_run_step_created(self, run_step: RunStep) -> None:
+        """Callback that is fired when a run step is created"""
+
+    async def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:
+        """Callback that is fired whenever a run step delta is returned from the API
+
+        The first argument is just the delta as sent by the API and the second argument
+        is the accumulated snapshot of the run step. For example, a tool calls event may
+        look like this:
+
+        # delta
+        tool_calls=[
+            RunStepDeltaToolCallsCodeInterpreter(
+                index=0,
+                type='code_interpreter',
+                id=None,
+                code_interpreter=CodeInterpreter(input=' sympy', outputs=None)
+            )
+        ]
+        # snapshot
+        tool_calls=[
+            CodeToolCall(
+                id='call_wKayJlcYV12NiadiZuJXxcfx',
+                code_interpreter=CodeInterpreter(input='from sympy', outputs=[]),
+                type='code_interpreter',
+                index=0
+            )
+        ],
+        """
+
+    async def on_run_step_done(self, run_step: RunStep) -> None:
+        """Callback that is fired when a run step is completed"""
+
+    async def on_tool_call_created(self, tool_call: ToolCall) -> None:
+        """Callback that is fired when a tool call is created"""
+
+    async def on_tool_call_delta(self, delta: ToolCallDelta, snapshot: ToolCall) -> None:
+        """Callback that is fired when a tool call delta is encountered"""
+
+    async def on_tool_call_done(self, tool_call: ToolCall) -> None:
+        """Callback that is fired when a tool call delta is encountered"""
+
+    async def on_exception(self, exception: Exception) -> None:
+        """Fired whenever an exception happens during streaming"""
+
+    async def on_timeout(self) -> None:
+        """Fires if the request times out"""
+
+    async def on_message_created(self, message: Message) -> None:
+        """Callback that is fired when a message is created"""
+
+    async def on_message_delta(self, delta: MessageDelta, snapshot: Message) -> None:
+        """Callback that is fired whenever a message delta is returned from the API
+
+        The first argument is just the delta as sent by the API and the second argument
+        is the accumulated snapshot of the message. For example, a text content event may
+        look like this:
+
+        # delta
+        MessageDeltaText(
+            index=0,
+            type='text',
+            text=Text(
+                value=' Jane'
+            ),
+        )
+        # snapshot
+        MessageContentText(
+            index=0,
+            type='text',
+            text=Text(
+                value='Certainly, Jane'
+            ),
+        )
+        """
+
+    async def on_message_done(self, message: Message) -> None:
+        """Callback that is fired when a message is completed"""
+
+    async def on_text_created(self, text: Text) -> None:
+        """Callback that is fired when a text content block is created"""
+
+    async def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:
+        """Callback that is fired whenever a text content delta is returned
+        by the API.
+
+        The first argument is just the delta as sent by the API and the second argument
+        is the accumulated snapshot of the text. For example:
+
+        on_text_delta(TextDelta(value="The"), Text(value="The")),
+        on_text_delta(TextDelta(value=" solution"), Text(value="The solution")),
+        on_text_delta(TextDelta(value=" to"), Text(value="The solution to")),
+        on_text_delta(TextDelta(value=" the"), Text(value="The solution to the")),
+        on_text_delta(TextDelta(value=" equation"), Text(value="The solution to the equivalent")),
+        """
+
+    async def on_text_done(self, text: Text) -> None:
+        """Callback that is fired when a text content block is finished"""
+
+    async def on_image_file_done(self, image_file: ImageFile) -> None:
+        """Callback that is fired when an image file block is finished"""
+
+    async def _emit_sse_event(self, event: AssistantStreamEvent) -> None:
+        self._current_event = event
+        await self.on_event(event)
+
+        self.__current_message_snapshot, new_content = accumulate_event(
+            event=event,
+            current_message_snapshot=self.__current_message_snapshot,
+        )
+        if self.__current_message_snapshot is not None:
+            self.__message_snapshots[self.__current_message_snapshot.id] = self.__current_message_snapshot
+
+        accumulate_run_step(
+            event=event,
+            run_step_snapshots=self.__run_step_snapshots,
+        )
+
+        for content_delta in new_content:
+            assert self.__current_message_snapshot is not None
+
+            block = self.__current_message_snapshot.content[content_delta.index]
+            if block.type == "text":
+                await self.on_text_created(block.text)
+
+        if (
+            event.event == "thread.run.completed"
+            or event.event == "thread.run.cancelled"
+            or event.event == "thread.run.expired"
+            or event.event == "thread.run.failed"
+            or event.event == "thread.run.requires_action"
+            or event.event == "thread.run.incomplete"
+        ):
+            self.__current_run = event.data
+            if self._current_tool_call:
+                await self.on_tool_call_done(self._current_tool_call)
+        elif (
+            event.event == "thread.run.created"
+            or event.event == "thread.run.in_progress"
+            or event.event == "thread.run.cancelling"
+            or event.event == "thread.run.queued"
+        ):
+            self.__current_run = event.data
+        elif event.event == "thread.message.created":
+            await self.on_message_created(event.data)
+        elif event.event == "thread.message.delta":
+            snapshot = self.__current_message_snapshot
+            assert snapshot is not None
+
+            message_delta = event.data.delta
+            if message_delta.content is not None:
+                for content_delta in message_delta.content:
+                    if content_delta.type == "text" and content_delta.text:
+                        snapshot_content = snapshot.content[content_delta.index]
+                        assert snapshot_content.type == "text"
+                        await self.on_text_delta(content_delta.text, snapshot_content.text)
+
+                    # If the delta is for a new message content:
+                    # - emit on_text_done/on_image_file_done for the previous message content
+                    # - emit on_text_created/on_image_created for the new message content
+                    if content_delta.index != self._current_message_content_index:
+                        if self._current_message_content is not None:
+                            if self._current_message_content.type == "text":
+                                await self.on_text_done(self._current_message_content.text)
+                            elif self._current_message_content.type == "image_file":
+                                await self.on_image_file_done(self._current_message_content.image_file)
+
+                        self._current_message_content_index = content_delta.index
+                        self._current_message_content = snapshot.content[content_delta.index]
+
+                    # Update the current_message_content (delta event is correctly emitted already)
+                    self._current_message_content = snapshot.content[content_delta.index]
+
+            await self.on_message_delta(event.data.delta, snapshot)
+        elif event.event == "thread.message.completed" or event.event == "thread.message.incomplete":
+            self.__current_message_snapshot = event.data
+            self.__message_snapshots[event.data.id] = event.data
+
+            if self._current_message_content_index is not None:
+                content = event.data.content[self._current_message_content_index]
+                if content.type == "text":
+                    await self.on_text_done(content.text)
+                elif content.type == "image_file":
+                    await self.on_image_file_done(content.image_file)
+
+            await self.on_message_done(event.data)
+        elif event.event == "thread.run.step.created":
+            self.__current_run_step_id = event.data.id
+            await self.on_run_step_created(event.data)
+        elif event.event == "thread.run.step.in_progress":
+            self.__current_run_step_id = event.data.id
+        elif event.event == "thread.run.step.delta":
+            step_snapshot = self.__run_step_snapshots[event.data.id]
+
+            run_step_delta = event.data.delta
+            if (
+                run_step_delta.step_details
+                and run_step_delta.step_details.type == "tool_calls"
+                and run_step_delta.step_details.tool_calls is not None
+            ):
+                assert step_snapshot.step_details.type == "tool_calls"
+                for tool_call_delta in run_step_delta.step_details.tool_calls:
+                    if tool_call_delta.index == self._current_tool_call_index:
+                        await self.on_tool_call_delta(
+                            tool_call_delta,
+                            step_snapshot.step_details.tool_calls[tool_call_delta.index],
+                        )
+
+                    # If the delta is for a new tool call:
+                    # - emit on_tool_call_done for the previous tool_call
+                    # - emit on_tool_call_created for the new tool_call
+                    if tool_call_delta.index != self._current_tool_call_index:
+                        if self._current_tool_call is not None:
+                            await self.on_tool_call_done(self._current_tool_call)
+
+                        self._current_tool_call_index = tool_call_delta.index
+                        self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index]
+                        await self.on_tool_call_created(self._current_tool_call)
+
+                    # Update the current_tool_call (delta event is correctly emitted already)
+                    self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index]
+
+            await self.on_run_step_delta(
+                event.data.delta,
+                step_snapshot,
+            )
+        elif (
+            event.event == "thread.run.step.completed"
+            or event.event == "thread.run.step.cancelled"
+            or event.event == "thread.run.step.expired"
+            or event.event == "thread.run.step.failed"
+        ):
+            if self._current_tool_call:
+                await self.on_tool_call_done(self._current_tool_call)
+
+            await self.on_run_step_done(event.data)
+            self.__current_run_step_id = None
+        elif event.event == "thread.created" or event.event == "thread.message.in_progress" or event.event == "error":
+            # currently no special handling
+            ...
+        else:
+            # we only want to error at build-time
+            if TYPE_CHECKING:  # type: ignore[unreachable]
+                assert_never(event)
+
+        self._current_event = None
+
+    async def __stream__(self) -> AsyncIterator[AssistantStreamEvent]:
+        stream = self.__stream
+        if not stream:
+            raise RuntimeError("Stream has not been started yet")
+
+        try:
+            async for event in stream:
+                await self._emit_sse_event(event)
+
+                yield event
+        except (httpx.TimeoutException, asyncio.TimeoutError) as exc:
+            await self.on_timeout()
+            await self.on_exception(exc)
+            raise
+        except Exception as exc:
+            await self.on_exception(exc)
+            raise
+        finally:
+            await self.on_end()
+
+
+AsyncAssistantEventHandlerT = TypeVar("AsyncAssistantEventHandlerT", bound=AsyncAssistantEventHandler)
+
+
+class AsyncAssistantStreamManager(Generic[AsyncAssistantEventHandlerT]):
+    """Wrapper over AsyncAssistantStreamEventHandler that is returned by `.stream()`
+    so that an async context manager can be used without `await`ing the
+    original client call.
+
+    ```py
+    async with client.threads.create_and_run_stream(...) as stream:
+        async for event in stream:
+            ...
+    ```
+    """
+
+    def __init__(
+        self,
+        api_request: Awaitable[AsyncStream[AssistantStreamEvent]],
+        *,
+        event_handler: AsyncAssistantEventHandlerT,
+    ) -> None:
+        self.__stream: AsyncStream[AssistantStreamEvent] | None = None
+        self.__event_handler = event_handler
+        self.__api_request = api_request
+
+    async def __aenter__(self) -> AsyncAssistantEventHandlerT:
+        self.__stream = await self.__api_request
+        self.__event_handler._init(self.__stream)
+        return self.__event_handler
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            await self.__stream.close()
+
+
+def accumulate_run_step(
+    *,
+    event: AssistantStreamEvent,
+    run_step_snapshots: dict[str, RunStep],
+) -> None:
+    if event.event == "thread.run.step.created":
+        run_step_snapshots[event.data.id] = event.data
+        return
+
+    if event.event == "thread.run.step.delta":
+        data = event.data
+        snapshot = run_step_snapshots[data.id]
+
+        if data.delta:
+            merged = accumulate_delta(
+                cast(
+                    "dict[object, object]",
+                    model_dump(snapshot, exclude_unset=True, warnings=False),
+                ),
+                cast(
+                    "dict[object, object]",
+                    model_dump(data.delta, exclude_unset=True, warnings=False),
+                ),
+            )
+            run_step_snapshots[snapshot.id] = cast(RunStep, construct_type(type_=RunStep, value=merged))
+
+    return None
+
+
+def accumulate_event(
+    *,
+    event: AssistantStreamEvent,
+    current_message_snapshot: Message | None,
+) -> tuple[Message | None, list[MessageContentDelta]]:
+    """Returns a tuple of message snapshot and newly created text message deltas"""
+    if event.event == "thread.message.created":
+        return event.data, []
+
+    new_content: list[MessageContentDelta] = []
+
+    if event.event != "thread.message.delta":
+        return current_message_snapshot, []
+
+    if not current_message_snapshot:
+        raise RuntimeError("Encountered a message delta with no previous snapshot")
+
+    data = event.data
+    if data.delta.content:
+        for content_delta in data.delta.content:
+            try:
+                block = current_message_snapshot.content[content_delta.index]
+            except IndexError:
+                current_message_snapshot.content.insert(
+                    content_delta.index,
+                    cast(
+                        MessageContent,
+                        construct_type(
+                            # mypy doesn't allow Content for some reason
+                            type_=cast(Any, MessageContent),
+                            value=model_dump(content_delta, exclude_unset=True, warnings=False),
+                        ),
+                    ),
+                )
+                new_content.append(content_delta)
+            else:
+                merged = accumulate_delta(
+                    cast(
+                        "dict[object, object]",
+                        model_dump(block, exclude_unset=True, warnings=False),
+                    ),
+                    cast(
+                        "dict[object, object]",
+                        model_dump(content_delta, exclude_unset=True, warnings=False),
+                    ),
+                )
+                current_message_snapshot.content[content_delta.index] = cast(
+                    MessageContent,
+                    construct_type(
+                        # mypy doesn't allow Content for some reason
+                        type_=cast(Any, MessageContent),
+                        value=merged,
+                    ),
+                )
+
+    return current_message_snapshot, new_content
+
+
+def accumulate_delta(acc: dict[object, object], delta: dict[object, object]) -> dict[object, object]:
+    for key, delta_value in delta.items():
+        if key not in acc:
+            acc[key] = delta_value
+            continue
+
+        acc_value = acc[key]
+        if acc_value is None:
+            acc[key] = delta_value
+            continue
+
+        # the `index` property is used in arrays of objects so it should
+        # not be accumulated like other values e.g.
+        # [{'foo': 'bar', 'index': 0}]
+        #
+        # the same applies to `type` properties as they're used for
+        # discriminated unions
+        if key == "index" or key == "type":
+            acc[key] = delta_value
+            continue
+
+        if isinstance(acc_value, str) and isinstance(delta_value, str):
+            acc_value += delta_value
+        elif isinstance(acc_value, (int, float)) and isinstance(delta_value, (int, float)):
+            acc_value += delta_value
+        elif is_dict(acc_value) and is_dict(delta_value):
+            acc_value = accumulate_delta(acc_value, delta_value)
+        elif is_list(acc_value) and is_list(delta_value):
+            # for lists of non-dictionary items we'll only ever get new entries
+            # in the array, existing entries will never be changed
+            if all(isinstance(x, (str, int, float)) for x in acc_value):
+                acc_value.extend(delta_value)
+                continue
+
+            for delta_entry in delta_value:
+                if not is_dict(delta_entry):
+                    raise TypeError(f"Unexpected list delta entry is not a dictionary: {delta_entry}")
+
+                try:
+                    index = delta_entry["index"]
+                except KeyError as exc:
+                    raise RuntimeError(f"Expected list delta entry to have an `index` key; {delta_entry}") from exc
+
+                if not isinstance(index, int):
+                    raise TypeError(f"Unexpected, list delta entry `index` value is not an integer; {index}")
+
+                try:
+                    acc_entry = acc_value[index]
+                except IndexError:
+                    acc_value.insert(index, delta_entry)
+                else:
+                    if not is_dict(acc_entry):
+                        raise TypeError("not handled yet")
+
+                    acc_value[index] = accumulate_delta(acc_entry, delta_entry)
+
+        acc[key] = acc_value
+
+    return acc
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/_deltas.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/_deltas.py
new file mode 100644
index 00000000..a5e13176
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/_deltas.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+from ..._utils import is_dict, is_list
+
+
+def accumulate_delta(acc: dict[object, object], delta: dict[object, object]) -> dict[object, object]:
+    for key, delta_value in delta.items():
+        if key not in acc:
+            acc[key] = delta_value
+            continue
+
+        acc_value = acc[key]
+        if acc_value is None:
+            acc[key] = delta_value
+            continue
+
+        # the `index` property is used in arrays of objects so it should
+        # not be accumulated like other values e.g.
+        # [{'foo': 'bar', 'index': 0}]
+        #
+        # the same applies to `type` properties as they're used for
+        # discriminated unions
+        if key == "index" or key == "type":
+            acc[key] = delta_value
+            continue
+
+        if isinstance(acc_value, str) and isinstance(delta_value, str):
+            acc_value += delta_value
+        elif isinstance(acc_value, (int, float)) and isinstance(delta_value, (int, float)):
+            acc_value += delta_value
+        elif is_dict(acc_value) and is_dict(delta_value):
+            acc_value = accumulate_delta(acc_value, delta_value)
+        elif is_list(acc_value) and is_list(delta_value):
+            # for lists of non-dictionary items we'll only ever get new entries
+            # in the array, existing entries will never be changed
+            if all(isinstance(x, (str, int, float)) for x in acc_value):
+                acc_value.extend(delta_value)
+                continue
+
+            for delta_entry in delta_value:
+                if not is_dict(delta_entry):
+                    raise TypeError(f"Unexpected list delta entry is not a dictionary: {delta_entry}")
+
+                try:
+                    index = delta_entry["index"]
+                except KeyError as exc:
+                    raise RuntimeError(f"Expected list delta entry to have an `index` key; {delta_entry}") from exc
+
+                if not isinstance(index, int):
+                    raise TypeError(f"Unexpected, list delta entry `index` value is not an integer; {index}")
+
+                try:
+                    acc_entry = acc_value[index]
+                except IndexError:
+                    acc_value.insert(index, delta_entry)
+                else:
+                    if not is_dict(acc_entry):
+                        raise TypeError("not handled yet")
+
+                    acc_value[index] = accumulate_delta(acc_entry, delta_entry)
+
+        acc[key] = acc_value
+
+    return acc
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/__init__.py
new file mode 100644
index 00000000..dfa3f3f2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/__init__.py
@@ -0,0 +1,27 @@
+from ._types import (
+    ParsedChoiceSnapshot as ParsedChoiceSnapshot,
+    ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
+    ParsedChatCompletionMessageSnapshot as ParsedChatCompletionMessageSnapshot,
+)
+from ._events import (
+    ChunkEvent as ChunkEvent,
+    ContentDoneEvent as ContentDoneEvent,
+    RefusalDoneEvent as RefusalDoneEvent,
+    ContentDeltaEvent as ContentDeltaEvent,
+    RefusalDeltaEvent as RefusalDeltaEvent,
+    LogprobsContentDoneEvent as LogprobsContentDoneEvent,
+    LogprobsRefusalDoneEvent as LogprobsRefusalDoneEvent,
+    ChatCompletionStreamEvent as ChatCompletionStreamEvent,
+    LogprobsContentDeltaEvent as LogprobsContentDeltaEvent,
+    LogprobsRefusalDeltaEvent as LogprobsRefusalDeltaEvent,
+    ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
+    FunctionToolCallArgumentsDoneEvent as FunctionToolCallArgumentsDoneEvent,
+    FunctionToolCallArgumentsDeltaEvent as FunctionToolCallArgumentsDeltaEvent,
+)
+from ._completions import (
+    ChatCompletionStream as ChatCompletionStream,
+    AsyncChatCompletionStream as AsyncChatCompletionStream,
+    ChatCompletionStreamState as ChatCompletionStreamState,
+    ChatCompletionStreamManager as ChatCompletionStreamManager,
+    AsyncChatCompletionStreamManager as AsyncChatCompletionStreamManager,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_completions.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_completions.py
new file mode 100644
index 00000000..21460913
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_completions.py
@@ -0,0 +1,755 @@
+from __future__ import annotations
+
+import inspect
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Generic, Callable, Iterable, Awaitable, AsyncIterator, cast
+from typing_extensions import Self, Iterator, assert_never
+
+from jiter import from_json
+
+from ._types import ParsedChoiceSnapshot, ParsedChatCompletionSnapshot, ParsedChatCompletionMessageSnapshot
+from ._events import (
+    ChunkEvent,
+    ContentDoneEvent,
+    RefusalDoneEvent,
+    ContentDeltaEvent,
+    RefusalDeltaEvent,
+    LogprobsContentDoneEvent,
+    LogprobsRefusalDoneEvent,
+    ChatCompletionStreamEvent,
+    LogprobsContentDeltaEvent,
+    LogprobsRefusalDeltaEvent,
+    FunctionToolCallArgumentsDoneEvent,
+    FunctionToolCallArgumentsDeltaEvent,
+)
+from .._deltas import accumulate_delta
+from ...._types import NOT_GIVEN, IncEx, NotGiven
+from ...._utils import is_given, consume_sync_iterator, consume_async_iterator
+from ...._compat import model_dump
+from ...._models import build, construct_type
+from ..._parsing import (
+    ResponseFormatT,
+    has_parseable_input,
+    maybe_parse_content,
+    parse_chat_completion,
+    get_input_tool_by_name,
+    solve_response_format_t,
+    parse_function_tool_arguments,
+)
+from ...._streaming import Stream, AsyncStream
+from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam
+from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
+from ....types.chat.chat_completion import ChoiceLogprobs
+from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk
+from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
+
+
+class ChatCompletionStream(Generic[ResponseFormatT]):
+    """Wrapper over the Chat Completions streaming API that adds helpful
+    events such as `content.done`, supports automatically parsing
+    responses & tool calls and accumulates a `ChatCompletion` object
+    from each individual chunk.
+
+    https://platform.openai.com/docs/api-reference/streaming
+    """
+
+    def __init__(
+        self,
+        *,
+        raw_stream: Stream[ChatCompletionChunk],
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
+
+    def __next__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
+        return self._iterator.__next__()
+
+    def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        for item in self._iterator:
+            yield item
+
+    def __enter__(self) -> Self:
+        return self
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.close()
+
+    def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        self._response.close()
+
+    def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedChatCompletion` object.
+
+        If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
+        property will be the content deserialised into that class, if there was any content returned
+        by the API.
+        """
+        self.until_done()
+        return self._state.get_final_completion()
+
+    def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        consume_sync_iterator(self)
+        return self
+
+    @property
+    def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+        return self._state.current_completion_snapshot
+
+    def __stream__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_chunk(sse_event)
+            for event in events_to_fire:
+                yield event
+
+
+class ChatCompletionStreamManager(Generic[ResponseFormatT]):
+    """Context manager over a `ChatCompletionStream` that is returned by `.stream()`.
+
+    This context manager ensures the response cannot be leaked if you don't read
+    the stream to completion.
+
+    Usage:
+    ```py
+    with client.beta.chat.completions.stream(...) as stream:
+        for event in stream:
+            ...
+    ```
+    """
+
+    def __init__(
+        self,
+        api_request: Callable[[], Stream[ChatCompletionChunk]],
+        *,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: ChatCompletionStream[ResponseFormatT] | None = None
+        self.__api_request = api_request
+        self.__response_format = response_format
+        self.__input_tools = input_tools
+
+    def __enter__(self) -> ChatCompletionStream[ResponseFormatT]:
+        raw_stream = self.__api_request()
+
+        self.__stream = ChatCompletionStream(
+            raw_stream=raw_stream,
+            response_format=self.__response_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            self.__stream.close()
+
+
+class AsyncChatCompletionStream(Generic[ResponseFormatT]):
+    """Wrapper over the Chat Completions streaming API that adds helpful
+    events such as `content.done`, supports automatically parsing
+    responses & tool calls and accumulates a `ChatCompletion` object
+    from each individual chunk.
+
+    https://platform.openai.com/docs/api-reference/streaming
+    """
+
+    def __init__(
+        self,
+        *,
+        raw_stream: AsyncStream[ChatCompletionChunk],
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
+
+    async def __anext__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
+        return await self._iterator.__anext__()
+
+    async def __aiter__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        async for item in self._iterator:
+            yield item
+
+    async def __aenter__(self) -> Self:
+        return self
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        await self.close()
+
+    async def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        await self._response.aclose()
+
+    async def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedChatCompletion` object.
+
+        If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
+        property will be the content deserialised into that class, if there was any content returned
+        by the API.
+        """
+        await self.until_done()
+        return self._state.get_final_completion()
+
+    async def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        await consume_async_iterator(self)
+        return self
+
+    @property
+    def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+        return self._state.current_completion_snapshot
+
+    async def __stream__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        async for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_chunk(sse_event)
+            for event in events_to_fire:
+                yield event
+
+
+class AsyncChatCompletionStreamManager(Generic[ResponseFormatT]):
+    """Context manager over a `AsyncChatCompletionStream` that is returned by `.stream()`.
+
+    This context manager ensures the response cannot be leaked if you don't read
+    the stream to completion.
+
+    Usage:
+    ```py
+    async with client.beta.chat.completions.stream(...) as stream:
+        for event in stream:
+            ...
+    ```
+    """
+
+    def __init__(
+        self,
+        api_request: Awaitable[AsyncStream[ChatCompletionChunk]],
+        *,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None
+        self.__api_request = api_request
+        self.__response_format = response_format
+        self.__input_tools = input_tools
+
+    async def __aenter__(self) -> AsyncChatCompletionStream[ResponseFormatT]:
+        raw_stream = await self.__api_request
+
+        self.__stream = AsyncChatCompletionStream(
+            raw_stream=raw_stream,
+            response_format=self.__response_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            await self.__stream.close()
+
+
+class ChatCompletionStreamState(Generic[ResponseFormatT]):
+    """Helper class for manually accumulating `ChatCompletionChunk`s into a final `ChatCompletion` object.
+
+    This is useful in cases where you can't always use the `.stream()` method, e.g.
+
+    ```py
+    from openai.lib.streaming.chat import ChatCompletionStreamState
+
+    state = ChatCompletionStreamState()
+
+    stream = client.chat.completions.create(..., stream=True)
+    for chunk in response:
+        state.handle_chunk(chunk)
+
+        # can also access the accumulated `ChatCompletion` mid-stream
+        state.current_completion_snapshot
+
+    print(state.get_final_completion())
+    ```
+    """
+
+    def __init__(
+        self,
+        *,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven = NOT_GIVEN,
+    ) -> None:
+        self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None
+        self.__choice_event_states: list[ChoiceEventState] = []
+
+        self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else []
+        self._response_format = response_format
+        self._rich_response_format: type | NotGiven = response_format if inspect.isclass(response_format) else NOT_GIVEN
+
+    def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+        """Parse the final completion object.
+
+        Note this does not provide any guarantees that the stream has actually finished, you must
+        only call this method when the stream is finished.
+        """
+        return parse_chat_completion(
+            chat_completion=self.current_completion_snapshot,
+            response_format=self._rich_response_format,
+            input_tools=self._input_tools,
+        )
+
+    @property
+    def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+        assert self.__current_completion_snapshot is not None
+        return self.__current_completion_snapshot
+
+    def handle_chunk(self, chunk: ChatCompletionChunk) -> Iterable[ChatCompletionStreamEvent[ResponseFormatT]]:
+        """Accumulate a new chunk into the snapshot and returns an iterable of events to yield."""
+        self.__current_completion_snapshot = self._accumulate_chunk(chunk)
+
+        return self._build_events(
+            chunk=chunk,
+            completion_snapshot=self.__current_completion_snapshot,
+        )
+
+    def _get_choice_state(self, choice: ChoiceChunk) -> ChoiceEventState:
+        try:
+            return self.__choice_event_states[choice.index]
+        except IndexError:
+            choice_state = ChoiceEventState(input_tools=self._input_tools)
+            self.__choice_event_states.append(choice_state)
+            return choice_state
+
+    def _accumulate_chunk(self, chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
+        completion_snapshot = self.__current_completion_snapshot
+
+        if completion_snapshot is None:
+            return _convert_initial_chunk_into_snapshot(chunk)
+
+        for choice in chunk.choices:
+            try:
+                choice_snapshot = completion_snapshot.choices[choice.index]
+                previous_tool_calls = choice_snapshot.message.tool_calls or []
+
+                choice_snapshot.message = cast(
+                    ParsedChatCompletionMessageSnapshot,
+                    construct_type(
+                        type_=ParsedChatCompletionMessageSnapshot,
+                        value=accumulate_delta(
+                            cast(
+                                "dict[object, object]",
+                                model_dump(
+                                    choice_snapshot.message,
+                                    # we don't want to serialise / deserialise our custom properties
+                                    # as they won't appear in the delta and we don't want to have to
+                                    # continuosly reparse the content
+                                    exclude=cast(
+                                        # cast required as mypy isn't smart enough to infer `True` here to `Literal[True]`
+                                        IncEx,
+                                        {
+                                            "parsed": True,
+                                            "tool_calls": {
+                                                idx: {"function": {"parsed_arguments": True}}
+                                                for idx, _ in enumerate(choice_snapshot.message.tool_calls or [])
+                                            },
+                                        },
+                                    ),
+                                ),
+                            ),
+                            cast("dict[object, object]", choice.delta.to_dict()),
+                        ),
+                    ),
+                )
+
+                # ensure tools that have already been parsed are added back into the newly
+                # constructed message snapshot
+                for tool_index, prev_tool in enumerate(previous_tool_calls):
+                    new_tool = (choice_snapshot.message.tool_calls or [])[tool_index]
+
+                    if prev_tool.type == "function":
+                        assert new_tool.type == "function"
+                        new_tool.function.parsed_arguments = prev_tool.function.parsed_arguments
+                    elif TYPE_CHECKING:  # type: ignore[unreachable]
+                        assert_never(prev_tool)
+            except IndexError:
+                choice_snapshot = cast(
+                    ParsedChoiceSnapshot,
+                    construct_type(
+                        type_=ParsedChoiceSnapshot,
+                        value={
+                            **choice.model_dump(exclude_unset=True, exclude={"delta"}),
+                            "message": choice.delta.to_dict(),
+                        },
+                    ),
+                )
+                completion_snapshot.choices.append(choice_snapshot)
+
+            if choice.finish_reason:
+                choice_snapshot.finish_reason = choice.finish_reason
+
+                if has_parseable_input(response_format=self._response_format, input_tools=self._input_tools):
+                    if choice.finish_reason == "length":
+                        # at the time of writing, `.usage` will always be `None` but
+                        # we include it here in case that is changed in the future
+                        raise LengthFinishReasonError(completion=completion_snapshot)
+
+                    if choice.finish_reason == "content_filter":
+                        raise ContentFilterFinishReasonError()
+
+            if (
+                choice_snapshot.message.content
+                and not choice_snapshot.message.refusal
+                and is_given(self._rich_response_format)
+            ):
+                choice_snapshot.message.parsed = from_json(
+                    bytes(choice_snapshot.message.content, "utf-8"),
+                    partial_mode=True,
+                )
+
+            for tool_call_chunk in choice.delta.tool_calls or []:
+                tool_call_snapshot = (choice_snapshot.message.tool_calls or [])[tool_call_chunk.index]
+
+                if tool_call_snapshot.type == "function":
+                    input_tool = get_input_tool_by_name(
+                        input_tools=self._input_tools, name=tool_call_snapshot.function.name
+                    )
+
+                    if (
+                        input_tool
+                        and input_tool.get("function", {}).get("strict")
+                        and tool_call_snapshot.function.arguments
+                    ):
+                        tool_call_snapshot.function.parsed_arguments = from_json(
+                            bytes(tool_call_snapshot.function.arguments, "utf-8"),
+                            partial_mode=True,
+                        )
+                elif TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(tool_call_snapshot)
+
+            if choice.logprobs is not None:
+                if choice_snapshot.logprobs is None:
+                    choice_snapshot.logprobs = build(
+                        ChoiceLogprobs,
+                        content=choice.logprobs.content,
+                        refusal=choice.logprobs.refusal,
+                    )
+                else:
+                    if choice.logprobs.content:
+                        if choice_snapshot.logprobs.content is None:
+                            choice_snapshot.logprobs.content = []
+
+                        choice_snapshot.logprobs.content.extend(choice.logprobs.content)
+
+                    if choice.logprobs.refusal:
+                        if choice_snapshot.logprobs.refusal is None:
+                            choice_snapshot.logprobs.refusal = []
+
+                        choice_snapshot.logprobs.refusal.extend(choice.logprobs.refusal)
+
+        completion_snapshot.usage = chunk.usage
+        completion_snapshot.system_fingerprint = chunk.system_fingerprint
+
+        return completion_snapshot
+
+    def _build_events(
+        self,
+        *,
+        chunk: ChatCompletionChunk,
+        completion_snapshot: ParsedChatCompletionSnapshot,
+    ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+        events_to_fire.append(
+            build(ChunkEvent, type="chunk", chunk=chunk, snapshot=completion_snapshot),
+        )
+
+        for choice in chunk.choices:
+            choice_state = self._get_choice_state(choice)
+            choice_snapshot = completion_snapshot.choices[choice.index]
+
+            if choice.delta.content is not None and choice_snapshot.message.content is not None:
+                events_to_fire.append(
+                    build(
+                        ContentDeltaEvent,
+                        type="content.delta",
+                        delta=choice.delta.content,
+                        snapshot=choice_snapshot.message.content,
+                        parsed=choice_snapshot.message.parsed,
+                    )
+                )
+
+            if choice.delta.refusal is not None and choice_snapshot.message.refusal is not None:
+                events_to_fire.append(
+                    build(
+                        RefusalDeltaEvent,
+                        type="refusal.delta",
+                        delta=choice.delta.refusal,
+                        snapshot=choice_snapshot.message.refusal,
+                    )
+                )
+
+            if choice.delta.tool_calls:
+                tool_calls = choice_snapshot.message.tool_calls
+                assert tool_calls is not None
+
+                for tool_call_delta in choice.delta.tool_calls:
+                    tool_call = tool_calls[tool_call_delta.index]
+
+                    if tool_call.type == "function":
+                        assert tool_call_delta.function is not None
+                        events_to_fire.append(
+                            build(
+                                FunctionToolCallArgumentsDeltaEvent,
+                                type="tool_calls.function.arguments.delta",
+                                name=tool_call.function.name,
+                                index=tool_call_delta.index,
+                                arguments=tool_call.function.arguments,
+                                parsed_arguments=tool_call.function.parsed_arguments,
+                                arguments_delta=tool_call_delta.function.arguments or "",
+                            )
+                        )
+                    elif TYPE_CHECKING:  # type: ignore[unreachable]
+                        assert_never(tool_call)
+
+            if choice.logprobs is not None and choice_snapshot.logprobs is not None:
+                if choice.logprobs.content and choice_snapshot.logprobs.content:
+                    events_to_fire.append(
+                        build(
+                            LogprobsContentDeltaEvent,
+                            type="logprobs.content.delta",
+                            content=choice.logprobs.content,
+                            snapshot=choice_snapshot.logprobs.content,
+                        ),
+                    )
+
+                if choice.logprobs.refusal and choice_snapshot.logprobs.refusal:
+                    events_to_fire.append(
+                        build(
+                            LogprobsRefusalDeltaEvent,
+                            type="logprobs.refusal.delta",
+                            refusal=choice.logprobs.refusal,
+                            snapshot=choice_snapshot.logprobs.refusal,
+                        ),
+                    )
+
+            events_to_fire.extend(
+                choice_state.get_done_events(
+                    choice_chunk=choice,
+                    choice_snapshot=choice_snapshot,
+                    response_format=self._response_format,
+                )
+            )
+
+        return events_to_fire
+
+
+class ChoiceEventState:
+    def __init__(self, *, input_tools: list[ChatCompletionToolParam]) -> None:
+        self._input_tools = input_tools
+
+        self._content_done = False
+        self._refusal_done = False
+        self._logprobs_content_done = False
+        self._logprobs_refusal_done = False
+        self._done_tool_calls: set[int] = set()
+        self.__current_tool_call_index: int | None = None
+
+    def get_done_events(
+        self,
+        *,
+        choice_chunk: ChoiceChunk,
+        choice_snapshot: ParsedChoiceSnapshot,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+    ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+        if choice_snapshot.finish_reason:
+            events_to_fire.extend(
+                self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
+            )
+
+            if (
+                self.__current_tool_call_index is not None
+                and self.__current_tool_call_index not in self._done_tool_calls
+            ):
+                self._add_tool_done_event(
+                    events_to_fire=events_to_fire,
+                    choice_snapshot=choice_snapshot,
+                    tool_index=self.__current_tool_call_index,
+                )
+
+        for tool_call in choice_chunk.delta.tool_calls or []:
+            if self.__current_tool_call_index != tool_call.index:
+                events_to_fire.extend(
+                    self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
+                )
+
+                if self.__current_tool_call_index is not None:
+                    self._add_tool_done_event(
+                        events_to_fire=events_to_fire,
+                        choice_snapshot=choice_snapshot,
+                        tool_index=self.__current_tool_call_index,
+                    )
+
+            self.__current_tool_call_index = tool_call.index
+
+        return events_to_fire
+
+    def _content_done_events(
+        self,
+        *,
+        choice_snapshot: ParsedChoiceSnapshot,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+    ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+        if choice_snapshot.message.content and not self._content_done:
+            self._content_done = True
+
+            parsed = maybe_parse_content(
+                response_format=response_format,
+                message=choice_snapshot.message,
+            )
+
+            # update the parsed content to now use the richer `response_format`
+            # as opposed to the raw JSON-parsed object as the content is now
+            # complete and can be fully validated.
+            choice_snapshot.message.parsed = parsed
+
+            events_to_fire.append(
+                build(
+                    # we do this dance so that when the `ContentDoneEvent` instance
+                    # is printed at runtime the class name will include the solved
+                    # type variable, e.g. `ContentDoneEvent[MyModelType]`
+                    cast(  # pyright: ignore[reportUnnecessaryCast]
+                        "type[ContentDoneEvent[ResponseFormatT]]",
+                        cast(Any, ContentDoneEvent)[solve_response_format_t(response_format)],
+                    ),
+                    type="content.done",
+                    content=choice_snapshot.message.content,
+                    parsed=parsed,
+                ),
+            )
+
+        if choice_snapshot.message.refusal is not None and not self._refusal_done:
+            self._refusal_done = True
+            events_to_fire.append(
+                build(RefusalDoneEvent, type="refusal.done", refusal=choice_snapshot.message.refusal),
+            )
+
+        if (
+            choice_snapshot.logprobs is not None
+            and choice_snapshot.logprobs.content is not None
+            and not self._logprobs_content_done
+        ):
+            self._logprobs_content_done = True
+            events_to_fire.append(
+                build(LogprobsContentDoneEvent, type="logprobs.content.done", content=choice_snapshot.logprobs.content),
+            )
+
+        if (
+            choice_snapshot.logprobs is not None
+            and choice_snapshot.logprobs.refusal is not None
+            and not self._logprobs_refusal_done
+        ):
+            self._logprobs_refusal_done = True
+            events_to_fire.append(
+                build(LogprobsRefusalDoneEvent, type="logprobs.refusal.done", refusal=choice_snapshot.logprobs.refusal),
+            )
+
+        return events_to_fire
+
+    def _add_tool_done_event(
+        self,
+        *,
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]],
+        choice_snapshot: ParsedChoiceSnapshot,
+        tool_index: int,
+    ) -> None:
+        if tool_index in self._done_tool_calls:
+            return
+
+        self._done_tool_calls.add(tool_index)
+
+        assert choice_snapshot.message.tool_calls is not None
+        tool_call_snapshot = choice_snapshot.message.tool_calls[tool_index]
+
+        if tool_call_snapshot.type == "function":
+            parsed_arguments = parse_function_tool_arguments(
+                input_tools=self._input_tools, function=tool_call_snapshot.function
+            )
+
+            # update the parsed content to potentially use a richer type
+            # as opposed to the raw JSON-parsed object as the content is now
+            # complete and can be fully validated.
+            tool_call_snapshot.function.parsed_arguments = parsed_arguments
+
+            events_to_fire.append(
+                build(
+                    FunctionToolCallArgumentsDoneEvent,
+                    type="tool_calls.function.arguments.done",
+                    index=tool_index,
+                    name=tool_call_snapshot.function.name,
+                    arguments=tool_call_snapshot.function.arguments,
+                    parsed_arguments=parsed_arguments,
+                )
+            )
+        elif TYPE_CHECKING:  # type: ignore[unreachable]
+            assert_never(tool_call_snapshot)
+
+
+def _convert_initial_chunk_into_snapshot(chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
+    data = chunk.to_dict()
+    choices = cast("list[object]", data["choices"])
+
+    for choice in chunk.choices:
+        choices[choice.index] = {
+            **choice.model_dump(exclude_unset=True, exclude={"delta"}),
+            "message": choice.delta.to_dict(),
+        }
+
+    return cast(
+        ParsedChatCompletionSnapshot,
+        construct_type(
+            type_=ParsedChatCompletionSnapshot,
+            value={
+                "system_fingerprint": None,
+                **data,
+                "object": "chat.completion",
+            },
+        ),
+    )
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_events.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_events.py
new file mode 100644
index 00000000..d4c1f283
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_events.py
@@ -0,0 +1,123 @@
+from typing import List, Union, Generic, Optional
+from typing_extensions import Literal
+
+from ._types import ParsedChatCompletionSnapshot
+from ...._models import BaseModel, GenericModel
+from ..._parsing import ResponseFormatT
+from ....types.chat import ChatCompletionChunk, ChatCompletionTokenLogprob
+
+
+class ChunkEvent(BaseModel):
+    type: Literal["chunk"]
+
+    chunk: ChatCompletionChunk
+
+    snapshot: ParsedChatCompletionSnapshot
+
+
+class ContentDeltaEvent(BaseModel):
+    """This event is yielded for every chunk with `choice.delta.content` data."""
+
+    type: Literal["content.delta"]
+
+    delta: str
+
+    snapshot: str
+
+    parsed: Optional[object] = None
+
+
+class ContentDoneEvent(GenericModel, Generic[ResponseFormatT]):
+    type: Literal["content.done"]
+
+    content: str
+
+    parsed: Optional[ResponseFormatT] = None
+
+
+class RefusalDeltaEvent(BaseModel):
+    type: Literal["refusal.delta"]
+
+    delta: str
+
+    snapshot: str
+
+
+class RefusalDoneEvent(BaseModel):
+    type: Literal["refusal.done"]
+
+    refusal: str
+
+
+class FunctionToolCallArgumentsDeltaEvent(BaseModel):
+    type: Literal["tool_calls.function.arguments.delta"]
+
+    name: str
+
+    index: int
+
+    arguments: str
+    """Accumulated raw JSON string"""
+
+    parsed_arguments: object
+    """The parsed arguments so far"""
+
+    arguments_delta: str
+    """The JSON string delta"""
+
+
+class FunctionToolCallArgumentsDoneEvent(BaseModel):
+    type: Literal["tool_calls.function.arguments.done"]
+
+    name: str
+
+    index: int
+
+    arguments: str
+    """Accumulated raw JSON string"""
+
+    parsed_arguments: object
+    """The parsed arguments"""
+
+
+class LogprobsContentDeltaEvent(BaseModel):
+    type: Literal["logprobs.content.delta"]
+
+    content: List[ChatCompletionTokenLogprob]
+
+    snapshot: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsContentDoneEvent(BaseModel):
+    type: Literal["logprobs.content.done"]
+
+    content: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsRefusalDeltaEvent(BaseModel):
+    type: Literal["logprobs.refusal.delta"]
+
+    refusal: List[ChatCompletionTokenLogprob]
+
+    snapshot: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsRefusalDoneEvent(BaseModel):
+    type: Literal["logprobs.refusal.done"]
+
+    refusal: List[ChatCompletionTokenLogprob]
+
+
+ChatCompletionStreamEvent = Union[
+    ChunkEvent,
+    ContentDeltaEvent,
+    ContentDoneEvent[ResponseFormatT],
+    RefusalDeltaEvent,
+    RefusalDoneEvent,
+    FunctionToolCallArgumentsDeltaEvent,
+    FunctionToolCallArgumentsDoneEvent,
+    LogprobsContentDeltaEvent,
+    LogprobsContentDoneEvent,
+    LogprobsRefusalDeltaEvent,
+    LogprobsRefusalDoneEvent,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_types.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_types.py
new file mode 100644
index 00000000..42552893
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/chat/_types.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+from typing_extensions import TypeAlias
+
+from ....types.chat import ParsedChoice, ParsedChatCompletion, ParsedChatCompletionMessage
+
+ParsedChatCompletionSnapshot: TypeAlias = ParsedChatCompletion[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedChatCompletion` object.
+"""
+
+ParsedChatCompletionMessageSnapshot: TypeAlias = ParsedChatCompletionMessage[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedChatCompletionMessage` object.
+
+If the content has been fully accumulated, the `.parsed` content will be
+the `response_format` instance, otherwise it'll be the raw JSON parsed version.
+"""
+
+ParsedChoiceSnapshot: TypeAlias = ParsedChoice[object]
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/__init__.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/__init__.py
new file mode 100644
index 00000000..ff073633
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/__init__.py
@@ -0,0 +1,13 @@
+from ._events import (
+    ResponseTextDoneEvent as ResponseTextDoneEvent,
+    ResponseTextDeltaEvent as ResponseTextDeltaEvent,
+    ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
+)
+from ._responses import (
+    ResponseStream as ResponseStream,
+    AsyncResponseStream as AsyncResponseStream,
+    ResponseStreamEvent as ResponseStreamEvent,
+    ResponseStreamState as ResponseStreamState,
+    ResponseStreamManager as ResponseStreamManager,
+    AsyncResponseStreamManager as AsyncResponseStreamManager,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_events.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_events.py
new file mode 100644
index 00000000..fe17edf6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_events.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Union, Generic, TypeVar, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._compat import GenericModel
+from ....types.responses import (
+    ParsedResponse,
+    ResponseErrorEvent,
+    ResponseFailedEvent,
+    ResponseCreatedEvent,
+    ResponseTextDoneEvent as RawResponseTextDoneEvent,
+    ResponseAudioDoneEvent,
+    ResponseCompletedEvent as RawResponseCompletedEvent,
+    ResponseTextDeltaEvent as RawResponseTextDeltaEvent,
+    ResponseAudioDeltaEvent,
+    ResponseIncompleteEvent,
+    ResponseInProgressEvent,
+    ResponseRefusalDoneEvent,
+    ResponseRefusalDeltaEvent,
+    ResponseOutputItemDoneEvent,
+    ResponseContentPartDoneEvent,
+    ResponseOutputItemAddedEvent,
+    ResponseContentPartAddedEvent,
+    ResponseAudioTranscriptDoneEvent,
+    ResponseTextAnnotationDeltaEvent,
+    ResponseAudioTranscriptDeltaEvent,
+    ResponseWebSearchCallCompletedEvent,
+    ResponseWebSearchCallSearchingEvent,
+    ResponseFileSearchCallCompletedEvent,
+    ResponseFileSearchCallSearchingEvent,
+    ResponseWebSearchCallInProgressEvent,
+    ResponseFileSearchCallInProgressEvent,
+    ResponseFunctionCallArgumentsDoneEvent,
+    ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent,
+    ResponseCodeInterpreterCallCodeDoneEvent,
+    ResponseCodeInterpreterCallCodeDeltaEvent,
+    ResponseCodeInterpreterCallCompletedEvent,
+    ResponseCodeInterpreterCallInProgressEvent,
+    ResponseCodeInterpreterCallInterpretingEvent,
+)
+
+TextFormatT = TypeVar(
+    "TextFormatT",
+    # if it isn't given then we don't do any parsing
+    default=None,
+)
+
+
+class ResponseTextDeltaEvent(RawResponseTextDeltaEvent):
+    snapshot: str
+
+
+class ResponseTextDoneEvent(RawResponseTextDoneEvent, GenericModel, Generic[TextFormatT]):
+    parsed: Optional[TextFormatT] = None
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(RawResponseFunctionCallArgumentsDeltaEvent):
+    snapshot: str
+
+
+class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[TextFormatT]):
+    response: ParsedResponse[TextFormatT]  # type: ignore[assignment]
+
+
+ResponseStreamEvent: TypeAlias = Annotated[
+    Union[
+        # wrappers with snapshots added on
+        ResponseTextDeltaEvent,
+        ResponseTextDoneEvent[TextFormatT],
+        ResponseFunctionCallArgumentsDeltaEvent,
+        ResponseCompletedEvent[TextFormatT],
+        # the same as the non-accumulated API
+        ResponseAudioDeltaEvent,
+        ResponseAudioDoneEvent,
+        ResponseAudioTranscriptDeltaEvent,
+        ResponseAudioTranscriptDoneEvent,
+        ResponseCodeInterpreterCallCodeDeltaEvent,
+        ResponseCodeInterpreterCallCodeDoneEvent,
+        ResponseCodeInterpreterCallCompletedEvent,
+        ResponseCodeInterpreterCallInProgressEvent,
+        ResponseCodeInterpreterCallInterpretingEvent,
+        ResponseContentPartAddedEvent,
+        ResponseContentPartDoneEvent,
+        ResponseCreatedEvent,
+        ResponseErrorEvent,
+        ResponseFileSearchCallCompletedEvent,
+        ResponseFileSearchCallInProgressEvent,
+        ResponseFileSearchCallSearchingEvent,
+        ResponseFunctionCallArgumentsDoneEvent,
+        ResponseInProgressEvent,
+        ResponseFailedEvent,
+        ResponseIncompleteEvent,
+        ResponseOutputItemAddedEvent,
+        ResponseOutputItemDoneEvent,
+        ResponseRefusalDeltaEvent,
+        ResponseRefusalDoneEvent,
+        ResponseTextAnnotationDeltaEvent,
+        ResponseTextDoneEvent,
+        ResponseWebSearchCallCompletedEvent,
+        ResponseWebSearchCallInProgressEvent,
+        ResponseWebSearchCallSearchingEvent,
+    ],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_responses.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_responses.py
new file mode 100644
index 00000000..f8f4b641
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_responses.py
@@ -0,0 +1,354 @@
+from __future__ import annotations
+
+import inspect
+from types import TracebackType
+from typing import Any, List, Generic, Iterable, Awaitable, cast
+from typing_extensions import Self, Callable, Iterator, AsyncIterator
+
+from ._types import ParsedResponseSnapshot
+from ._events import (
+    ResponseStreamEvent,
+    ResponseTextDoneEvent,
+    ResponseCompletedEvent,
+    ResponseTextDeltaEvent,
+    ResponseFunctionCallArgumentsDeltaEvent,
+)
+from ...._types import NOT_GIVEN, NotGiven
+from ...._utils import is_given, consume_sync_iterator, consume_async_iterator
+from ...._models import build, construct_type_unchecked
+from ...._streaming import Stream, AsyncStream
+from ....types.responses import ParsedResponse, ResponseStreamEvent as RawResponseStreamEvent
+from ..._parsing._responses import TextFormatT, parse_text, parse_response
+from ....types.responses.tool_param import ToolParam
+from ....types.responses.parsed_response import (
+    ParsedContent,
+    ParsedResponseOutputMessage,
+    ParsedResponseFunctionToolCall,
+)
+
+
+class ResponseStream(Generic[TextFormatT]):
+    def __init__(
+        self,
+        *,
+        raw_stream: Stream[RawResponseStreamEvent],
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
+
+    def __next__(self) -> ResponseStreamEvent[TextFormatT]:
+        return self._iterator.__next__()
+
+    def __iter__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]:
+        for item in self._iterator:
+            yield item
+
+    def __enter__(self) -> Self:
+        return self
+
+    def __stream__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]:
+        for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_event(sse_event)
+            for event in events_to_fire:
+                yield event
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.close()
+
+    def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        self._response.close()
+
+    def get_final_response(self) -> ParsedResponse[TextFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedResponse` object.
+        """
+        self.until_done()
+        response = self._state._completed_response
+        if not response:
+            raise RuntimeError("Didn't receive a `response.completed` event.")
+
+        return response
+
+    def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        consume_sync_iterator(self)
+        return self
+
+
+class ResponseStreamManager(Generic[TextFormatT]):
+    def __init__(
+        self,
+        api_request: Callable[[], Stream[RawResponseStreamEvent]],
+        *,
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: ResponseStream[TextFormatT] | None = None
+        self.__api_request = api_request
+        self.__text_format = text_format
+        self.__input_tools = input_tools
+
+    def __enter__(self) -> ResponseStream[TextFormatT]:
+        raw_stream = self.__api_request()
+
+        self.__stream = ResponseStream(
+            raw_stream=raw_stream,
+            text_format=self.__text_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            self.__stream.close()
+
+
+class AsyncResponseStream(Generic[TextFormatT]):
+    def __init__(
+        self,
+        *,
+        raw_stream: AsyncStream[RawResponseStreamEvent],
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
+
+    async def __anext__(self) -> ResponseStreamEvent[TextFormatT]:
+        return await self._iterator.__anext__()
+
+    async def __aiter__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
+        async for item in self._iterator:
+            yield item
+
+    async def __stream__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
+        async for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_event(sse_event)
+            for event in events_to_fire:
+                yield event
+
+    async def __aenter__(self) -> Self:
+        return self
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        await self.close()
+
+    async def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        await self._response.aclose()
+
+    async def get_final_response(self) -> ParsedResponse[TextFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedResponse` object.
+        """
+        await self.until_done()
+        response = self._state._completed_response
+        if not response:
+            raise RuntimeError("Didn't receive a `response.completed` event.")
+
+        return response
+
+    async def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        await consume_async_iterator(self)
+        return self
+
+
+class AsyncResponseStreamManager(Generic[TextFormatT]):
+    def __init__(
+        self,
+        api_request: Awaitable[AsyncStream[RawResponseStreamEvent]],
+        *,
+        text_format: type[TextFormatT] | NotGiven,
+        input_tools: Iterable[ToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: AsyncResponseStream[TextFormatT] | None = None
+        self.__api_request = api_request
+        self.__text_format = text_format
+        self.__input_tools = input_tools
+
+    async def __aenter__(self) -> AsyncResponseStream[TextFormatT]:
+        raw_stream = await self.__api_request
+
+        self.__stream = AsyncResponseStream(
+            raw_stream=raw_stream,
+            text_format=self.__text_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            await self.__stream.close()
+
+
+class ResponseStreamState(Generic[TextFormatT]):
+    def __init__(
+        self,
+        *,
+        input_tools: Iterable[ToolParam] | NotGiven,
+        text_format: type[TextFormatT] | NotGiven,
+    ) -> None:
+        self.__current_snapshot: ParsedResponseSnapshot | None = None
+        self._completed_response: ParsedResponse[TextFormatT] | None = None
+        self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else []
+        self._text_format = text_format
+        self._rich_text_format: type | NotGiven = text_format if inspect.isclass(text_format) else NOT_GIVEN
+
+    def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEvent[TextFormatT]]:
+        self.__current_snapshot = snapshot = self.accumulate_event(event)
+
+        events: List[ResponseStreamEvent[TextFormatT]] = []
+
+        if event.type == "response.output_text.delta":
+            output = snapshot.output[event.output_index]
+            assert output.type == "message"
+
+            content = output.content[event.content_index]
+            assert content.type == "output_text"
+
+            events.append(
+                build(
+                    ResponseTextDeltaEvent,
+                    content_index=event.content_index,
+                    delta=event.delta,
+                    item_id=event.item_id,
+                    output_index=event.output_index,
+                    type="response.output_text.delta",
+                    snapshot=content.text,
+                )
+            )
+        elif event.type == "response.output_text.done":
+            output = snapshot.output[event.output_index]
+            assert output.type == "message"
+
+            content = output.content[event.content_index]
+            assert content.type == "output_text"
+
+            events.append(
+                build(
+                    ResponseTextDoneEvent[TextFormatT],
+                    content_index=event.content_index,
+                    item_id=event.item_id,
+                    output_index=event.output_index,
+                    type="response.output_text.done",
+                    text=event.text,
+                    parsed=parse_text(event.text, text_format=self._text_format),
+                )
+            )
+        elif event.type == "response.function_call_arguments.delta":
+            output = snapshot.output[event.output_index]
+            assert output.type == "function_call"
+
+            events.append(
+                build(
+                    ResponseFunctionCallArgumentsDeltaEvent,
+                    delta=event.delta,
+                    item_id=event.item_id,
+                    output_index=event.output_index,
+                    type="response.function_call_arguments.delta",
+                    snapshot=output.arguments,
+                )
+            )
+
+        elif event.type == "response.completed":
+            response = self._completed_response
+            assert response is not None
+
+            events.append(
+                build(
+                    ResponseCompletedEvent,
+                    type="response.completed",
+                    response=response,
+                )
+            )
+        else:
+            events.append(event)
+
+        return events
+
+    def accumulate_event(self, event: RawResponseStreamEvent) -> ParsedResponseSnapshot:
+        snapshot = self.__current_snapshot
+        if snapshot is None:
+            return self._create_initial_response(event)
+
+        if event.type == "response.output_item.added":
+            if event.item.type == "function_call":
+                snapshot.output.append(
+                    construct_type_unchecked(
+                        type_=cast(Any, ParsedResponseFunctionToolCall), value=event.item.to_dict()
+                    )
+                )
+            elif event.item.type == "message":
+                snapshot.output.append(
+                    construct_type_unchecked(type_=cast(Any, ParsedResponseOutputMessage), value=event.item.to_dict())
+                )
+            else:
+                snapshot.output.append(event.item)
+        elif event.type == "response.content_part.added":
+            output = snapshot.output[event.output_index]
+            if output.type == "message":
+                output.content.append(
+                    construct_type_unchecked(type_=cast(Any, ParsedContent), value=event.part.to_dict())
+                )
+        elif event.type == "response.output_text.delta":
+            output = snapshot.output[event.output_index]
+            if output.type == "message":
+                content = output.content[event.content_index]
+                assert content.type == "output_text"
+                content.text += event.delta
+        elif event.type == "response.function_call_arguments.delta":
+            output = snapshot.output[event.output_index]
+            if output.type == "function_call":
+                output.arguments += event.delta
+        elif event.type == "response.completed":
+            self._completed_response = parse_response(
+                text_format=self._text_format,
+                response=event.response,
+                input_tools=self._input_tools,
+            )
+
+        return snapshot
+
+    def _create_initial_response(self, event: RawResponseStreamEvent) -> ParsedResponseSnapshot:
+        if event.type != "response.created":
+            raise RuntimeError(f"Expected to have received `response.created` before `{event.type}`")
+
+        return construct_type_unchecked(type_=ParsedResponseSnapshot, value=event.response.to_dict())
diff --git a/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_types.py b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_types.py
new file mode 100644
index 00000000..6d3fd90e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/lib/streaming/responses/_types.py
@@ -0,0 +1,10 @@
+from __future__ import annotations
+
+from typing_extensions import TypeAlias
+
+from ....types.responses import ParsedResponse
+
+ParsedResponseSnapshot: TypeAlias = ParsedResponse[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedResponse` object.
+"""
diff --git a/.venv/lib/python3.12/site-packages/openai/pagination.py b/.venv/lib/python3.12/site-packages/openai/pagination.py
new file mode 100644
index 00000000..a59cced8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/pagination.py
@@ -0,0 +1,125 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Any, List, Generic, TypeVar, Optional, cast
+from typing_extensions import Protocol, override, runtime_checkable
+
+from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage
+
+__all__ = ["SyncPage", "AsyncPage", "SyncCursorPage", "AsyncCursorPage"]
+
+_T = TypeVar("_T")
+
+
+@runtime_checkable
+class CursorPageItem(Protocol):
+    id: Optional[str]
+
+
+class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
+    """Note: no pagination actually occurs yet, this is for forwards-compatibility."""
+
+    data: List[_T]
+    object: str
+
+    @override
+    def _get_page_items(self) -> List[_T]:
+        data = self.data
+        if not data:
+            return []
+        return data
+
+    @override
+    def next_page_info(self) -> None:
+        """
+        This page represents a response that isn't actually paginated at the API level
+        so there will never be a next page.
+        """
+        return None
+
+
+class AsyncPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
+    """Note: no pagination actually occurs yet, this is for forwards-compatibility."""
+
+    data: List[_T]
+    object: str
+
+    @override
+    def _get_page_items(self) -> List[_T]:
+        data = self.data
+        if not data:
+            return []
+        return data
+
+    @override
+    def next_page_info(self) -> None:
+        """
+        This page represents a response that isn't actually paginated at the API level
+        so there will never be a next page.
+        """
+        return None
+
+
+class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
+    data: List[_T]
+    has_more: Optional[bool] = None
+
+    @override
+    def _get_page_items(self) -> List[_T]:
+        data = self.data
+        if not data:
+            return []
+        return data
+
+    @override
+    def has_next_page(self) -> bool:
+        has_more = self.has_more
+        if has_more is not None and has_more is False:
+            return False
+
+        return super().has_next_page()
+
+    @override
+    def next_page_info(self) -> Optional[PageInfo]:
+        data = self.data
+        if not data:
+            return None
+
+        item = cast(Any, data[-1])
+        if not isinstance(item, CursorPageItem) or item.id is None:
+            # TODO emit warning log
+            return None
+
+        return PageInfo(params={"after": item.id})
+
+
+class AsyncCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
+    data: List[_T]
+    has_more: Optional[bool] = None
+
+    @override
+    def _get_page_items(self) -> List[_T]:
+        data = self.data
+        if not data:
+            return []
+        return data
+
+    @override
+    def has_next_page(self) -> bool:
+        has_more = self.has_more
+        if has_more is not None and has_more is False:
+            return False
+
+        return super().has_next_page()
+
+    @override
+    def next_page_info(self) -> Optional[PageInfo]:
+        data = self.data
+        if not data:
+            return None
+
+        item = cast(Any, data[-1])
+        if not isinstance(item, CursorPageItem) or item.id is None:
+            # TODO emit warning log
+            return None
+
+        return PageInfo(params={"after": item.id})
diff --git a/.venv/lib/python3.12/site-packages/openai/py.typed b/.venv/lib/python3.12/site-packages/openai/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/py.typed
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/__init__.py
new file mode 100644
index 00000000..d3457cf3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/__init__.py
@@ -0,0 +1,201 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .beta import (
+    Beta,
+    AsyncBeta,
+    BetaWithRawResponse,
+    AsyncBetaWithRawResponse,
+    BetaWithStreamingResponse,
+    AsyncBetaWithStreamingResponse,
+)
+from .chat import (
+    Chat,
+    AsyncChat,
+    ChatWithRawResponse,
+    AsyncChatWithRawResponse,
+    ChatWithStreamingResponse,
+    AsyncChatWithStreamingResponse,
+)
+from .audio import (
+    Audio,
+    AsyncAudio,
+    AudioWithRawResponse,
+    AsyncAudioWithRawResponse,
+    AudioWithStreamingResponse,
+    AsyncAudioWithStreamingResponse,
+)
+from .files import (
+    Files,
+    AsyncFiles,
+    FilesWithRawResponse,
+    AsyncFilesWithRawResponse,
+    FilesWithStreamingResponse,
+    AsyncFilesWithStreamingResponse,
+)
+from .images import (
+    Images,
+    AsyncImages,
+    ImagesWithRawResponse,
+    AsyncImagesWithRawResponse,
+    ImagesWithStreamingResponse,
+    AsyncImagesWithStreamingResponse,
+)
+from .models import (
+    Models,
+    AsyncModels,
+    ModelsWithRawResponse,
+    AsyncModelsWithRawResponse,
+    ModelsWithStreamingResponse,
+    AsyncModelsWithStreamingResponse,
+)
+from .batches import (
+    Batches,
+    AsyncBatches,
+    BatchesWithRawResponse,
+    AsyncBatchesWithRawResponse,
+    BatchesWithStreamingResponse,
+    AsyncBatchesWithStreamingResponse,
+)
+from .uploads import (
+    Uploads,
+    AsyncUploads,
+    UploadsWithRawResponse,
+    AsyncUploadsWithRawResponse,
+    UploadsWithStreamingResponse,
+    AsyncUploadsWithStreamingResponse,
+)
+from .responses import (
+    Responses,
+    AsyncResponses,
+    ResponsesWithRawResponse,
+    AsyncResponsesWithRawResponse,
+    ResponsesWithStreamingResponse,
+    AsyncResponsesWithStreamingResponse,
+)
+from .embeddings import (
+    Embeddings,
+    AsyncEmbeddings,
+    EmbeddingsWithRawResponse,
+    AsyncEmbeddingsWithRawResponse,
+    EmbeddingsWithStreamingResponse,
+    AsyncEmbeddingsWithStreamingResponse,
+)
+from .completions import (
+    Completions,
+    AsyncCompletions,
+    CompletionsWithRawResponse,
+    AsyncCompletionsWithRawResponse,
+    CompletionsWithStreamingResponse,
+    AsyncCompletionsWithStreamingResponse,
+)
+from .fine_tuning import (
+    FineTuning,
+    AsyncFineTuning,
+    FineTuningWithRawResponse,
+    AsyncFineTuningWithRawResponse,
+    FineTuningWithStreamingResponse,
+    AsyncFineTuningWithStreamingResponse,
+)
+from .moderations import (
+    Moderations,
+    AsyncModerations,
+    ModerationsWithRawResponse,
+    AsyncModerationsWithRawResponse,
+    ModerationsWithStreamingResponse,
+    AsyncModerationsWithStreamingResponse,
+)
+from .vector_stores import (
+    VectorStores,
+    AsyncVectorStores,
+    VectorStoresWithRawResponse,
+    AsyncVectorStoresWithRawResponse,
+    VectorStoresWithStreamingResponse,
+    AsyncVectorStoresWithStreamingResponse,
+)
+
+__all__ = [
+    "Completions",
+    "AsyncCompletions",
+    "CompletionsWithRawResponse",
+    "AsyncCompletionsWithRawResponse",
+    "CompletionsWithStreamingResponse",
+    "AsyncCompletionsWithStreamingResponse",
+    "Chat",
+    "AsyncChat",
+    "ChatWithRawResponse",
+    "AsyncChatWithRawResponse",
+    "ChatWithStreamingResponse",
+    "AsyncChatWithStreamingResponse",
+    "Embeddings",
+    "AsyncEmbeddings",
+    "EmbeddingsWithRawResponse",
+    "AsyncEmbeddingsWithRawResponse",
+    "EmbeddingsWithStreamingResponse",
+    "AsyncEmbeddingsWithStreamingResponse",
+    "Files",
+    "AsyncFiles",
+    "FilesWithRawResponse",
+    "AsyncFilesWithRawResponse",
+    "FilesWithStreamingResponse",
+    "AsyncFilesWithStreamingResponse",
+    "Images",
+    "AsyncImages",
+    "ImagesWithRawResponse",
+    "AsyncImagesWithRawResponse",
+    "ImagesWithStreamingResponse",
+    "AsyncImagesWithStreamingResponse",
+    "Audio",
+    "AsyncAudio",
+    "AudioWithRawResponse",
+    "AsyncAudioWithRawResponse",
+    "AudioWithStreamingResponse",
+    "AsyncAudioWithStreamingResponse",
+    "Moderations",
+    "AsyncModerations",
+    "ModerationsWithRawResponse",
+    "AsyncModerationsWithRawResponse",
+    "ModerationsWithStreamingResponse",
+    "AsyncModerationsWithStreamingResponse",
+    "Models",
+    "AsyncModels",
+    "ModelsWithRawResponse",
+    "AsyncModelsWithRawResponse",
+    "ModelsWithStreamingResponse",
+    "AsyncModelsWithStreamingResponse",
+    "FineTuning",
+    "AsyncFineTuning",
+    "FineTuningWithRawResponse",
+    "AsyncFineTuningWithRawResponse",
+    "FineTuningWithStreamingResponse",
+    "AsyncFineTuningWithStreamingResponse",
+    "VectorStores",
+    "AsyncVectorStores",
+    "VectorStoresWithRawResponse",
+    "AsyncVectorStoresWithRawResponse",
+    "VectorStoresWithStreamingResponse",
+    "AsyncVectorStoresWithStreamingResponse",
+    "Beta",
+    "AsyncBeta",
+    "BetaWithRawResponse",
+    "AsyncBetaWithRawResponse",
+    "BetaWithStreamingResponse",
+    "AsyncBetaWithStreamingResponse",
+    "Batches",
+    "AsyncBatches",
+    "BatchesWithRawResponse",
+    "AsyncBatchesWithRawResponse",
+    "BatchesWithStreamingResponse",
+    "AsyncBatchesWithStreamingResponse",
+    "Uploads",
+    "AsyncUploads",
+    "UploadsWithRawResponse",
+    "AsyncUploadsWithRawResponse",
+    "UploadsWithStreamingResponse",
+    "AsyncUploadsWithStreamingResponse",
+    "Responses",
+    "AsyncResponses",
+    "ResponsesWithRawResponse",
+    "AsyncResponsesWithRawResponse",
+    "ResponsesWithStreamingResponse",
+    "AsyncResponsesWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/audio/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/audio/__init__.py
new file mode 100644
index 00000000..7da1d2db
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/audio/__init__.py
@@ -0,0 +1,61 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .audio import (
+    Audio,
+    AsyncAudio,
+    AudioWithRawResponse,
+    AsyncAudioWithRawResponse,
+    AudioWithStreamingResponse,
+    AsyncAudioWithStreamingResponse,
+)
+from .speech import (
+    Speech,
+    AsyncSpeech,
+    SpeechWithRawResponse,
+    AsyncSpeechWithRawResponse,
+    SpeechWithStreamingResponse,
+    AsyncSpeechWithStreamingResponse,
+)
+from .translations import (
+    Translations,
+    AsyncTranslations,
+    TranslationsWithRawResponse,
+    AsyncTranslationsWithRawResponse,
+    TranslationsWithStreamingResponse,
+    AsyncTranslationsWithStreamingResponse,
+)
+from .transcriptions import (
+    Transcriptions,
+    AsyncTranscriptions,
+    TranscriptionsWithRawResponse,
+    AsyncTranscriptionsWithRawResponse,
+    TranscriptionsWithStreamingResponse,
+    AsyncTranscriptionsWithStreamingResponse,
+)
+
+__all__ = [
+    "Transcriptions",
+    "AsyncTranscriptions",
+    "TranscriptionsWithRawResponse",
+    "AsyncTranscriptionsWithRawResponse",
+    "TranscriptionsWithStreamingResponse",
+    "AsyncTranscriptionsWithStreamingResponse",
+    "Translations",
+    "AsyncTranslations",
+    "TranslationsWithRawResponse",
+    "AsyncTranslationsWithRawResponse",
+    "TranslationsWithStreamingResponse",
+    "AsyncTranslationsWithStreamingResponse",
+    "Speech",
+    "AsyncSpeech",
+    "SpeechWithRawResponse",
+    "AsyncSpeechWithRawResponse",
+    "SpeechWithStreamingResponse",
+    "AsyncSpeechWithStreamingResponse",
+    "Audio",
+    "AsyncAudio",
+    "AudioWithRawResponse",
+    "AsyncAudioWithRawResponse",
+    "AudioWithStreamingResponse",
+    "AsyncAudioWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/audio/audio.py b/.venv/lib/python3.12/site-packages/openai/resources/audio/audio.py
new file mode 100644
index 00000000..383b7073
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/audio/audio.py
@@ -0,0 +1,166 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .speech import (
+    Speech,
+    AsyncSpeech,
+    SpeechWithRawResponse,
+    AsyncSpeechWithRawResponse,
+    SpeechWithStreamingResponse,
+    AsyncSpeechWithStreamingResponse,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .translations import (
+    Translations,
+    AsyncTranslations,
+    TranslationsWithRawResponse,
+    AsyncTranslationsWithRawResponse,
+    TranslationsWithStreamingResponse,
+    AsyncTranslationsWithStreamingResponse,
+)
+from .transcriptions import (
+    Transcriptions,
+    AsyncTranscriptions,
+    TranscriptionsWithRawResponse,
+    AsyncTranscriptionsWithRawResponse,
+    TranscriptionsWithStreamingResponse,
+    AsyncTranscriptionsWithStreamingResponse,
+)
+
+__all__ = ["Audio", "AsyncAudio"]
+
+
+class Audio(SyncAPIResource):
+    @cached_property
+    def transcriptions(self) -> Transcriptions:
+        return Transcriptions(self._client)
+
+    @cached_property
+    def translations(self) -> Translations:
+        return Translations(self._client)
+
+    @cached_property
+    def speech(self) -> Speech:
+        return Speech(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AudioWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AudioWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AudioWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AudioWithStreamingResponse(self)
+
+
+class AsyncAudio(AsyncAPIResource):
+    @cached_property
+    def transcriptions(self) -> AsyncTranscriptions:
+        return AsyncTranscriptions(self._client)
+
+    @cached_property
+    def translations(self) -> AsyncTranslations:
+        return AsyncTranslations(self._client)
+
+    @cached_property
+    def speech(self) -> AsyncSpeech:
+        return AsyncSpeech(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncAudioWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncAudioWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncAudioWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncAudioWithStreamingResponse(self)
+
+
+class AudioWithRawResponse:
+    def __init__(self, audio: Audio) -> None:
+        self._audio = audio
+
+    @cached_property
+    def transcriptions(self) -> TranscriptionsWithRawResponse:
+        return TranscriptionsWithRawResponse(self._audio.transcriptions)
+
+    @cached_property
+    def translations(self) -> TranslationsWithRawResponse:
+        return TranslationsWithRawResponse(self._audio.translations)
+
+    @cached_property
+    def speech(self) -> SpeechWithRawResponse:
+        return SpeechWithRawResponse(self._audio.speech)
+
+
+class AsyncAudioWithRawResponse:
+    def __init__(self, audio: AsyncAudio) -> None:
+        self._audio = audio
+
+    @cached_property
+    def transcriptions(self) -> AsyncTranscriptionsWithRawResponse:
+        return AsyncTranscriptionsWithRawResponse(self._audio.transcriptions)
+
+    @cached_property
+    def translations(self) -> AsyncTranslationsWithRawResponse:
+        return AsyncTranslationsWithRawResponse(self._audio.translations)
+
+    @cached_property
+    def speech(self) -> AsyncSpeechWithRawResponse:
+        return AsyncSpeechWithRawResponse(self._audio.speech)
+
+
+class AudioWithStreamingResponse:
+    def __init__(self, audio: Audio) -> None:
+        self._audio = audio
+
+    @cached_property
+    def transcriptions(self) -> TranscriptionsWithStreamingResponse:
+        return TranscriptionsWithStreamingResponse(self._audio.transcriptions)
+
+    @cached_property
+    def translations(self) -> TranslationsWithStreamingResponse:
+        return TranslationsWithStreamingResponse(self._audio.translations)
+
+    @cached_property
+    def speech(self) -> SpeechWithStreamingResponse:
+        return SpeechWithStreamingResponse(self._audio.speech)
+
+
+class AsyncAudioWithStreamingResponse:
+    def __init__(self, audio: AsyncAudio) -> None:
+        self._audio = audio
+
+    @cached_property
+    def transcriptions(self) -> AsyncTranscriptionsWithStreamingResponse:
+        return AsyncTranscriptionsWithStreamingResponse(self._audio.transcriptions)
+
+    @cached_property
+    def translations(self) -> AsyncTranslationsWithStreamingResponse:
+        return AsyncTranslationsWithStreamingResponse(self._audio.translations)
+
+    @cached_property
+    def speech(self) -> AsyncSpeechWithStreamingResponse:
+        return AsyncSpeechWithStreamingResponse(self._audio.speech)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/audio/speech.py b/.venv/lib/python3.12/site-packages/openai/resources/audio/speech.py
new file mode 100644
index 00000000..529e3a47
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/audio/speech.py
@@ -0,0 +1,244 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+    StreamedBinaryAPIResponse,
+    AsyncStreamedBinaryAPIResponse,
+    to_custom_streamed_response_wrapper,
+    async_to_custom_streamed_response_wrapper,
+)
+from ...types.audio import speech_create_params
+from ..._base_client import make_request_options
+from ...types.audio.speech_model import SpeechModel
+
+__all__ = ["Speech", "AsyncSpeech"]
+
+
+class Speech(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> SpeechWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return SpeechWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> SpeechWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return SpeechWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        input: str,
+        model: Union[str, SpeechModel],
+        voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"],
+        instructions: str | NotGiven = NOT_GIVEN,
+        response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
+        speed: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> _legacy_response.HttpxBinaryResponseContent:
+        """
+        Generates audio from the input text.
+
+        Args:
+          input: The text to generate audio for. The maximum length is 4096 characters.
+
+          model:
+              One of the available [TTS models](https://platform.openai.com/docs/models#tts):
+              `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+
+          voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
+              `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the
+              voices are available in the
+              [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
+
+          instructions: Control the voice of your generated audio with additional instructions. Does not
+              work with `tts-1` or `tts-1-hd`.
+
+          response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
+              `wav`, and `pcm`.
+
+          speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
+              the default.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
+        return self._post(
+            "/audio/speech",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "voice": voice,
+                    "instructions": instructions,
+                    "response_format": response_format,
+                    "speed": speed,
+                },
+                speech_create_params.SpeechCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
+        )
+
+
+class AsyncSpeech(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncSpeechWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncSpeechWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncSpeechWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncSpeechWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        input: str,
+        model: Union[str, SpeechModel],
+        voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"],
+        instructions: str | NotGiven = NOT_GIVEN,
+        response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
+        speed: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> _legacy_response.HttpxBinaryResponseContent:
+        """
+        Generates audio from the input text.
+
+        Args:
+          input: The text to generate audio for. The maximum length is 4096 characters.
+
+          model:
+              One of the available [TTS models](https://platform.openai.com/docs/models#tts):
+              `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+
+          voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
+              `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the
+              voices are available in the
+              [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
+
+          instructions: Control the voice of your generated audio with additional instructions. Does not
+              work with `tts-1` or `tts-1-hd`.
+
+          response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
+              `wav`, and `pcm`.
+
+          speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
+              the default.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
+        return await self._post(
+            "/audio/speech",
+            body=await async_maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "voice": voice,
+                    "instructions": instructions,
+                    "response_format": response_format,
+                    "speed": speed,
+                },
+                speech_create_params.SpeechCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
+        )
+
+
+class SpeechWithRawResponse:
+    def __init__(self, speech: Speech) -> None:
+        self._speech = speech
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            speech.create,
+        )
+
+
+class AsyncSpeechWithRawResponse:
+    def __init__(self, speech: AsyncSpeech) -> None:
+        self._speech = speech
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            speech.create,
+        )
+
+
+class SpeechWithStreamingResponse:
+    def __init__(self, speech: Speech) -> None:
+        self._speech = speech
+
+        self.create = to_custom_streamed_response_wrapper(
+            speech.create,
+            StreamedBinaryAPIResponse,
+        )
+
+
+class AsyncSpeechWithStreamingResponse:
+    def __init__(self, speech: AsyncSpeech) -> None:
+        self._speech = speech
+
+        self.create = async_to_custom_streamed_response_wrapper(
+            speech.create,
+            AsyncStreamedBinaryAPIResponse,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/audio/transcriptions.py b/.venv/lib/python3.12/site-packages/openai/resources/audio/transcriptions.py
new file mode 100644
index 00000000..2a77f91d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/audio/transcriptions.py
@@ -0,0 +1,682 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import logging
+from typing import TYPE_CHECKING, List, Union, Mapping, Optional, cast
+from typing_extensions import Literal, overload, assert_never
+
+import httpx
+
+from ... import _legacy_response
+from ...types import AudioResponseFormat
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
+    extract_files,
+    required_args,
+    maybe_transform,
+    deepcopy_minimal,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..._streaming import Stream, AsyncStream
+from ...types.audio import transcription_create_params
+from ..._base_client import make_request_options
+from ...types.audio_model import AudioModel
+from ...types.audio.transcription import Transcription
+from ...types.audio_response_format import AudioResponseFormat
+from ...types.audio.transcription_include import TranscriptionInclude
+from ...types.audio.transcription_verbose import TranscriptionVerbose
+from ...types.audio.transcription_stream_event import TranscriptionStreamEvent
+from ...types.audio.transcription_create_response import TranscriptionCreateResponse
+
+__all__ = ["Transcriptions", "AsyncTranscriptions"]
+
+log: logging.Logger = logging.getLogger("openai.audio.transcriptions")
+
+
+class Transcriptions(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> TranscriptionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return TranscriptionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> TranscriptionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return TranscriptionsWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Transcription: ...
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        response_format: Literal["verbose_json"],
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranscriptionVerbose: ...
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Literal["text", "srt", "vtt"],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> str: ...
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        stream: Literal[True],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[TranscriptionStreamEvent]:
+        """
+        Transcribes audio into the input language.
+
+        Args:
+          file:
+              The audio file object (not file name) to transcribe, in one of these formats:
+              flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+
+          model: ID of the model to use. The options are `gpt-4o-transcribe`,
+              `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
+              Whisper V2 model).
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+              for more information.
+
+              Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+
+          include: Additional information to include in the transcription response. `logprobs` will
+              return the log probabilities of the tokens in the response to understand the
+              model's confidence in the transcription. `logprobs` only works with
+              response_format set to `json` and only with the models `gpt-4o-transcribe` and
+              `gpt-4o-mini-transcribe`.
+
+          language: The language of the input audio. Supplying the input language in
+              [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+              format will improve accuracy and latency.
+
+          prompt: An optional text to guide the model's style or continue a previous audio
+              segment. The
+              [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+              should match the audio language.
+
+          response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
+              `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
+              the only supported format is `json`.
+
+          temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
+              output more random, while lower values like 0.2 will make it more focused and
+              deterministic. If set to 0, the model will use
+              [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+              automatically increase the temperature until certain thresholds are hit.
+
+          timestamp_granularities: The timestamp granularities to populate for this transcription.
+              `response_format` must be set `verbose_json` to use timestamp granularities.
+              Either or both of these options are supported: `word`, or `segment`. Note: There
+              is no additional latency for segment timestamps, but generating word timestamps
+              incurs additional latency.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        stream: bool,
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranscriptionCreateResponse | Stream[TranscriptionStreamEvent]:
+        """
+        Transcribes audio into the input language.
+
+        Args:
+          file:
+              The audio file object (not file name) to transcribe, in one of these formats:
+              flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+
+          model: ID of the model to use. The options are `gpt-4o-transcribe`,
+              `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
+              Whisper V2 model).
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+              for more information.
+
+              Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+
+          include: Additional information to include in the transcription response. `logprobs` will
+              return the log probabilities of the tokens in the response to understand the
+              model's confidence in the transcription. `logprobs` only works with
+              response_format set to `json` and only with the models `gpt-4o-transcribe` and
+              `gpt-4o-mini-transcribe`.
+
+          language: The language of the input audio. Supplying the input language in
+              [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+              format will improve accuracy and latency.
+
+          prompt: An optional text to guide the model's style or continue a previous audio
+              segment. The
+              [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+              should match the audio language.
+
+          response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
+              `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
+              the only supported format is `json`.
+
+          temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
+              output more random, while lower values like 0.2 will make it more focused and
+              deterministic. If set to 0, the model will use
+              [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+              automatically increase the temperature until certain thresholds are hit.
+
+          timestamp_granularities: The timestamp granularities to populate for this transcription.
+              `response_format` must be set `verbose_json` to use timestamp granularities.
+              Either or both of these options are supported: `word`, or `segment`. Note: There
+              is no additional latency for segment timestamps, but generating word timestamps
+              incurs additional latency.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["file", "model"], ["file", "model", "stream"])
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> str | Transcription | TranscriptionVerbose | Stream[TranscriptionStreamEvent]:
+        body = deepcopy_minimal(
+            {
+                "file": file,
+                "model": model,
+                "include": include,
+                "language": language,
+                "prompt": prompt,
+                "response_format": response_format,
+                "stream": stream,
+                "temperature": temperature,
+                "timestamp_granularities": timestamp_granularities,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return self._post(  # type: ignore[return-value]
+            "/audio/transcriptions",
+            body=maybe_transform(body, transcription_create_params.TranscriptionCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_get_response_format_type(response_format),
+            stream=stream or False,
+            stream_cls=Stream[TranscriptionStreamEvent],
+        )
+
+
+class AsyncTranscriptions(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncTranscriptionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncTranscriptionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncTranscriptionsWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Transcription: ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        response_format: Literal["verbose_json"],
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranscriptionVerbose: ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        response_format: Literal["text", "srt", "vtt"],
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> str: ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        stream: Literal[True],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[TranscriptionStreamEvent]:
+        """
+        Transcribes audio into the input language.
+
+        Args:
+          file:
+              The audio file object (not file name) to transcribe, in one of these formats:
+              flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+
+          model: ID of the model to use. The options are `gpt-4o-transcribe`,
+              `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
+              Whisper V2 model).
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+              for more information.
+
+              Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+
+          include: Additional information to include in the transcription response. `logprobs` will
+              return the log probabilities of the tokens in the response to understand the
+              model's confidence in the transcription. `logprobs` only works with
+              response_format set to `json` and only with the models `gpt-4o-transcribe` and
+              `gpt-4o-mini-transcribe`.
+
+          language: The language of the input audio. Supplying the input language in
+              [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+              format will improve accuracy and latency.
+
+          prompt: An optional text to guide the model's style or continue a previous audio
+              segment. The
+              [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+              should match the audio language.
+
+          response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
+              `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
+              the only supported format is `json`.
+
+          temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
+              output more random, while lower values like 0.2 will make it more focused and
+              deterministic. If set to 0, the model will use
+              [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+              automatically increase the temperature until certain thresholds are hit.
+
+          timestamp_granularities: The timestamp granularities to populate for this transcription.
+              `response_format` must be set `verbose_json` to use timestamp granularities.
+              Either or both of these options are supported: `word`, or `segment`. Note: There
+              is no additional latency for segment timestamps, but generating word timestamps
+              incurs additional latency.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        stream: bool,
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranscriptionCreateResponse | AsyncStream[TranscriptionStreamEvent]:
+        """
+        Transcribes audio into the input language.
+
+        Args:
+          file:
+              The audio file object (not file name) to transcribe, in one of these formats:
+              flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+
+          model: ID of the model to use. The options are `gpt-4o-transcribe`,
+              `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
+              Whisper V2 model).
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+              for more information.
+
+              Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+
+          include: Additional information to include in the transcription response. `logprobs` will
+              return the log probabilities of the tokens in the response to understand the
+              model's confidence in the transcription. `logprobs` only works with
+              response_format set to `json` and only with the models `gpt-4o-transcribe` and
+              `gpt-4o-mini-transcribe`.
+
+          language: The language of the input audio. Supplying the input language in
+              [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+              format will improve accuracy and latency.
+
+          prompt: An optional text to guide the model's style or continue a previous audio
+              segment. The
+              [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+              should match the audio language.
+
+          response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
+              `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
+              the only supported format is `json`.
+
+          temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
+              output more random, while lower values like 0.2 will make it more focused and
+              deterministic. If set to 0, the model will use
+              [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+              automatically increase the temperature until certain thresholds are hit.
+
+          timestamp_granularities: The timestamp granularities to populate for this transcription.
+              `response_format` must be set `verbose_json` to use timestamp granularities.
+              Either or both of these options are supported: `word`, or `segment`. Note: There
+              is no additional latency for segment timestamps, but generating word timestamps
+              incurs additional latency.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["file", "model"], ["file", "model", "stream"])
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
+        language: str | NotGiven = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Transcription | TranscriptionVerbose | str | AsyncStream[TranscriptionStreamEvent]:
+        body = deepcopy_minimal(
+            {
+                "file": file,
+                "model": model,
+                "include": include,
+                "language": language,
+                "prompt": prompt,
+                "response_format": response_format,
+                "stream": stream,
+                "temperature": temperature,
+                "timestamp_granularities": timestamp_granularities,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return await self._post(
+            "/audio/transcriptions",
+            body=await async_maybe_transform(body, transcription_create_params.TranscriptionCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_get_response_format_type(response_format),
+            stream=stream or False,
+            stream_cls=AsyncStream[TranscriptionStreamEvent],
+        )
+
+
+class TranscriptionsWithRawResponse:
+    def __init__(self, transcriptions: Transcriptions) -> None:
+        self._transcriptions = transcriptions
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            transcriptions.create,
+        )
+
+
+class AsyncTranscriptionsWithRawResponse:
+    def __init__(self, transcriptions: AsyncTranscriptions) -> None:
+        self._transcriptions = transcriptions
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            transcriptions.create,
+        )
+
+
+class TranscriptionsWithStreamingResponse:
+    def __init__(self, transcriptions: Transcriptions) -> None:
+        self._transcriptions = transcriptions
+
+        self.create = to_streamed_response_wrapper(
+            transcriptions.create,
+        )
+
+
+class AsyncTranscriptionsWithStreamingResponse:
+    def __init__(self, transcriptions: AsyncTranscriptions) -> None:
+        self._transcriptions = transcriptions
+
+        self.create = async_to_streamed_response_wrapper(
+            transcriptions.create,
+        )
+
+
+def _get_response_format_type(
+    response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven,
+) -> type[Transcription | TranscriptionVerbose | str]:
+    if isinstance(response_format, NotGiven) or response_format is None:  # pyright: ignore[reportUnnecessaryComparison]
+        return Transcription
+
+    if response_format == "json":
+        return Transcription
+    elif response_format == "verbose_json":
+        return TranscriptionVerbose
+    elif response_format == "srt" or response_format == "text" or response_format == "vtt":
+        return str
+    elif TYPE_CHECKING:  # type: ignore[unreachable]
+        assert_never(response_format)
+    else:
+        log.warn("Unexpected audio response format: %s", response_format)
+        return Transcription
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/audio/translations.py b/.venv/lib/python3.12/site-packages/openai/resources/audio/translations.py
new file mode 100644
index 00000000..f55dbd0e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/audio/translations.py
@@ -0,0 +1,372 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import logging
+from typing import TYPE_CHECKING, Union, Mapping, cast
+from typing_extensions import Literal, overload, assert_never
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
+    extract_files,
+    maybe_transform,
+    deepcopy_minimal,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...types.audio import translation_create_params
+from ..._base_client import make_request_options
+from ...types.audio_model import AudioModel
+from ...types.audio.translation import Translation
+from ...types.audio_response_format import AudioResponseFormat
+from ...types.audio.translation_verbose import TranslationVerbose
+
+__all__ = ["Translations", "AsyncTranslations"]
+
+log: logging.Logger = logging.getLogger("openai.audio.transcriptions")
+
+
+class Translations(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> TranslationsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return TranslationsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> TranslationsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return TranslationsWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Translation: ...
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Literal["verbose_json"],
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranslationVerbose: ...
+
+    @overload
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Literal["text", "srt", "vtt"],
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> str: ...
+
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[Literal["json", "text", "srt", "verbose_json", "vtt"], NotGiven] = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Translation | TranslationVerbose | str:
+        """
+        Translates audio into English.
+
+        Args:
+          file: The audio file object (not file name) translate, in one of these formats: flac,
+              mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+
+          model: ID of the model to use. Only `whisper-1` (which is powered by our open source
+              Whisper V2 model) is currently available.
+
+          prompt: An optional text to guide the model's style or continue a previous audio
+              segment. The
+              [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+              should be in English.
+
+          response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
+              `verbose_json`, or `vtt`.
+
+          temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
+              output more random, while lower values like 0.2 will make it more focused and
+              deterministic. If set to 0, the model will use
+              [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+              automatically increase the temperature until certain thresholds are hit.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "file": file,
+                "model": model,
+                "prompt": prompt,
+                "response_format": response_format,
+                "temperature": temperature,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return self._post(  # type: ignore[return-value]
+            "/audio/translations",
+            body=maybe_transform(body, translation_create_params.TranslationCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_get_response_format_type(response_format),
+        )
+
+
+class AsyncTranslations(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncTranslationsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncTranslationsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncTranslationsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncTranslationsWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Translation: ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Literal["verbose_json"],
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranslationVerbose: ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        response_format: Literal["text", "srt", "vtt"],
+        prompt: str | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> str: ...
+
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        model: Union[str, AudioModel],
+        prompt: str | NotGiven = NOT_GIVEN,
+        response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Translation | TranslationVerbose | str:
+        """
+        Translates audio into English.
+
+        Args:
+          file: The audio file object (not file name) translate, in one of these formats: flac,
+              mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+
+          model: ID of the model to use. Only `whisper-1` (which is powered by our open source
+              Whisper V2 model) is currently available.
+
+          prompt: An optional text to guide the model's style or continue a previous audio
+              segment. The
+              [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+              should be in English.
+
+          response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
+              `verbose_json`, or `vtt`.
+
+          temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
+              output more random, while lower values like 0.2 will make it more focused and
+              deterministic. If set to 0, the model will use
+              [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+              automatically increase the temperature until certain thresholds are hit.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "file": file,
+                "model": model,
+                "prompt": prompt,
+                "response_format": response_format,
+                "temperature": temperature,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return await self._post(
+            "/audio/translations",
+            body=await async_maybe_transform(body, translation_create_params.TranslationCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_get_response_format_type(response_format),
+        )
+
+
+class TranslationsWithRawResponse:
+    def __init__(self, translations: Translations) -> None:
+        self._translations = translations
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            translations.create,
+        )
+
+
+class AsyncTranslationsWithRawResponse:
+    def __init__(self, translations: AsyncTranslations) -> None:
+        self._translations = translations
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            translations.create,
+        )
+
+
+class TranslationsWithStreamingResponse:
+    def __init__(self, translations: Translations) -> None:
+        self._translations = translations
+
+        self.create = to_streamed_response_wrapper(
+            translations.create,
+        )
+
+
+class AsyncTranslationsWithStreamingResponse:
+    def __init__(self, translations: AsyncTranslations) -> None:
+        self._translations = translations
+
+        self.create = async_to_streamed_response_wrapper(
+            translations.create,
+        )
+
+
+def _get_response_format_type(
+    response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven,
+) -> type[Translation | TranslationVerbose | str]:
+    if isinstance(response_format, NotGiven) or response_format is None:  # pyright: ignore[reportUnnecessaryComparison]
+        return Translation
+
+    if response_format == "json":
+        return Translation
+    elif response_format == "verbose_json":
+        return TranslationVerbose
+    elif response_format == "srt" or response_format == "text" or response_format == "vtt":
+        return str
+    elif TYPE_CHECKING:  # type: ignore[unreachable]
+        assert_never(response_format)
+    else:
+        log.warn("Unexpected audio response format: %s", response_format)
+        return Transcription
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/batches.py b/.venv/lib/python3.12/site-packages/openai/resources/batches.py
new file mode 100644
index 00000000..b7a299be
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/batches.py
@@ -0,0 +1,517 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal
+
+import httpx
+
+from .. import _legacy_response
+from ..types import batch_list_params, batch_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..pagination import SyncCursorPage, AsyncCursorPage
+from ..types.batch import Batch
+from .._base_client import AsyncPaginator, make_request_options
+from ..types.shared_params.metadata import Metadata
+
+__all__ = ["Batches", "AsyncBatches"]
+
+
+class Batches(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> BatchesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return BatchesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> BatchesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return BatchesWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        completion_window: Literal["24h"],
+        endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
+        input_file_id: str,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Batch:
+        """
+        Creates and executes a batch from an uploaded file of requests
+
+        Args:
+          completion_window: The time frame within which the batch should be processed. Currently only `24h`
+              is supported.
+
+          endpoint: The endpoint to be used for all requests in the batch. Currently
+              `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
+              are supported. Note that `/v1/embeddings` batches are also restricted to a
+              maximum of 50,000 embedding inputs across all requests in the batch.
+
+          input_file_id: The ID of an uploaded file that contains requests for the new batch.
+
+              See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+              for how to upload a file.
+
+              Your input file must be formatted as a
+              [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
+              and must be uploaded with the purpose `batch`. The file can contain up to 50,000
+              requests, and can be up to 200 MB in size.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._post(
+            "/batches",
+            body=maybe_transform(
+                {
+                    "completion_window": completion_window,
+                    "endpoint": endpoint,
+                    "input_file_id": input_file_id,
+                    "metadata": metadata,
+                },
+                batch_create_params.BatchCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Batch,
+        )
+
+    def retrieve(
+        self,
+        batch_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Batch:
+        """
+        Retrieves a batch.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        return self._get(
+            f"/batches/{batch_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Batch,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[Batch]:
+        """List your organization's batches.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/batches",
+            page=SyncCursorPage[Batch],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    batch_list_params.BatchListParams,
+                ),
+            ),
+            model=Batch,
+        )
+
+    def cancel(
+        self,
+        batch_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Batch:
+        """Cancels an in-progress batch.
+
+        The batch will be in status `cancelling` for up to
+        10 minutes, before changing to `cancelled`, where it will have partial results
+        (if any) available in the output file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        return self._post(
+            f"/batches/{batch_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Batch,
+        )
+
+
+class AsyncBatches(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncBatchesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncBatchesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncBatchesWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        completion_window: Literal["24h"],
+        endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
+        input_file_id: str,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Batch:
+        """
+        Creates and executes a batch from an uploaded file of requests
+
+        Args:
+          completion_window: The time frame within which the batch should be processed. Currently only `24h`
+              is supported.
+
+          endpoint: The endpoint to be used for all requests in the batch. Currently
+              `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
+              are supported. Note that `/v1/embeddings` batches are also restricted to a
+              maximum of 50,000 embedding inputs across all requests in the batch.
+
+          input_file_id: The ID of an uploaded file that contains requests for the new batch.
+
+              See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+              for how to upload a file.
+
+              Your input file must be formatted as a
+              [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
+              and must be uploaded with the purpose `batch`. The file can contain up to 50,000
+              requests, and can be up to 200 MB in size.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return await self._post(
+            "/batches",
+            body=await async_maybe_transform(
+                {
+                    "completion_window": completion_window,
+                    "endpoint": endpoint,
+                    "input_file_id": input_file_id,
+                    "metadata": metadata,
+                },
+                batch_create_params.BatchCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Batch,
+        )
+
+    async def retrieve(
+        self,
+        batch_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Batch:
+        """
+        Retrieves a batch.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        return await self._get(
+            f"/batches/{batch_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Batch,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]:
+        """List your organization's batches.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/batches",
+            page=AsyncCursorPage[Batch],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    batch_list_params.BatchListParams,
+                ),
+            ),
+            model=Batch,
+        )
+
+    async def cancel(
+        self,
+        batch_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Batch:
+        """Cancels an in-progress batch.
+
+        The batch will be in status `cancelling` for up to
+        10 minutes, before changing to `cancelled`, where it will have partial results
+        (if any) available in the output file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        return await self._post(
+            f"/batches/{batch_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Batch,
+        )
+
+
+class BatchesWithRawResponse:
+    def __init__(self, batches: Batches) -> None:
+        self._batches = batches
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            batches.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            batches.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            batches.list,
+        )
+        self.cancel = _legacy_response.to_raw_response_wrapper(
+            batches.cancel,
+        )
+
+
+class AsyncBatchesWithRawResponse:
+    def __init__(self, batches: AsyncBatches) -> None:
+        self._batches = batches
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            batches.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            batches.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            batches.list,
+        )
+        self.cancel = _legacy_response.async_to_raw_response_wrapper(
+            batches.cancel,
+        )
+
+
+class BatchesWithStreamingResponse:
+    def __init__(self, batches: Batches) -> None:
+        self._batches = batches
+
+        self.create = to_streamed_response_wrapper(
+            batches.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            batches.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            batches.list,
+        )
+        self.cancel = to_streamed_response_wrapper(
+            batches.cancel,
+        )
+
+
+class AsyncBatchesWithStreamingResponse:
+    def __init__(self, batches: AsyncBatches) -> None:
+        self._batches = batches
+
+        self.create = async_to_streamed_response_wrapper(
+            batches.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            batches.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            batches.list,
+        )
+        self.cancel = async_to_streamed_response_wrapper(
+            batches.cancel,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/__init__.py
new file mode 100644
index 00000000..87fea252
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .beta import (
+    Beta,
+    AsyncBeta,
+    BetaWithRawResponse,
+    AsyncBetaWithRawResponse,
+    BetaWithStreamingResponse,
+    AsyncBetaWithStreamingResponse,
+)
+from .threads import (
+    Threads,
+    AsyncThreads,
+    ThreadsWithRawResponse,
+    AsyncThreadsWithRawResponse,
+    ThreadsWithStreamingResponse,
+    AsyncThreadsWithStreamingResponse,
+)
+from .assistants import (
+    Assistants,
+    AsyncAssistants,
+    AssistantsWithRawResponse,
+    AsyncAssistantsWithRawResponse,
+    AssistantsWithStreamingResponse,
+    AsyncAssistantsWithStreamingResponse,
+)
+
+__all__ = [
+    "Assistants",
+    "AsyncAssistants",
+    "AssistantsWithRawResponse",
+    "AsyncAssistantsWithRawResponse",
+    "AssistantsWithStreamingResponse",
+    "AsyncAssistantsWithStreamingResponse",
+    "Threads",
+    "AsyncThreads",
+    "ThreadsWithRawResponse",
+    "AsyncThreadsWithRawResponse",
+    "ThreadsWithStreamingResponse",
+    "AsyncThreadsWithStreamingResponse",
+    "Beta",
+    "AsyncBeta",
+    "BetaWithRawResponse",
+    "AsyncBetaWithRawResponse",
+    "BetaWithStreamingResponse",
+    "AsyncBetaWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/assistants.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/assistants.py
new file mode 100644
index 00000000..1c7cbf37
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/assistants.py
@@ -0,0 +1,1004 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ...types.beta import (
+    assistant_list_params,
+    assistant_create_params,
+    assistant_update_params,
+)
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.beta.assistant import Assistant
+from ...types.shared.chat_model import ChatModel
+from ...types.beta.assistant_deleted import AssistantDeleted
+from ...types.shared_params.metadata import Metadata
+from ...types.shared.reasoning_effort import ReasoningEffort
+from ...types.beta.assistant_tool_param import AssistantToolParam
+from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = ["Assistants", "AsyncAssistants"]
+
+
+class Assistants(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AssistantsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AssistantsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AssistantsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AssistantsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        model: Union[str, ChatModel],
+        description: Optional[str] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        name: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Assistant:
+        """
+        Create an assistant with a model and instructions.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          description: The description of the assistant. The maximum length is 512 characters.
+
+          instructions: The system instructions that the assistant uses. The maximum length is 256,000
+              characters.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          name: The name of the assistant. The maximum length is 256 characters.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
+              assistant. Tools can be of types `code_interpreter`, `file_search`, or
+              `function`.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            "/assistants",
+            body=maybe_transform(
+                {
+                    "model": model,
+                    "description": description,
+                    "instructions": instructions,
+                    "metadata": metadata,
+                    "name": name,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_resources": tool_resources,
+                    "tools": tools,
+                    "top_p": top_p,
+                },
+                assistant_create_params.AssistantCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Assistant,
+        )
+
+    def retrieve(
+        self,
+        assistant_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Assistant:
+        """
+        Retrieves an assistant.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not assistant_id:
+            raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/assistants/{assistant_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Assistant,
+        )
+
+    def update(
+        self,
+        assistant_id: str,
+        *,
+        description: Optional[str] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[
+            str,
+            Literal[
+                "o3-mini",
+                "o3-mini-2025-01-31",
+                "o1",
+                "o1-2024-12-17",
+                "gpt-4o",
+                "gpt-4o-2024-11-20",
+                "gpt-4o-2024-08-06",
+                "gpt-4o-2024-05-13",
+                "gpt-4o-mini",
+                "gpt-4o-mini-2024-07-18",
+                "gpt-4.5-preview",
+                "gpt-4.5-preview-2025-02-27",
+                "gpt-4-turbo",
+                "gpt-4-turbo-2024-04-09",
+                "gpt-4-0125-preview",
+                "gpt-4-turbo-preview",
+                "gpt-4-1106-preview",
+                "gpt-4-vision-preview",
+                "gpt-4",
+                "gpt-4-0314",
+                "gpt-4-0613",
+                "gpt-4-32k",
+                "gpt-4-32k-0314",
+                "gpt-4-32k-0613",
+                "gpt-3.5-turbo",
+                "gpt-3.5-turbo-16k",
+                "gpt-3.5-turbo-0613",
+                "gpt-3.5-turbo-1106",
+                "gpt-3.5-turbo-0125",
+                "gpt-3.5-turbo-16k-0613",
+            ],
+        ]
+        | NotGiven = NOT_GIVEN,
+        name: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Assistant:
+        """Modifies an assistant.
+
+        Args:
+          description: The description of the assistant.
+
+        The maximum length is 512 characters.
+
+          instructions: The system instructions that the assistant uses. The maximum length is 256,000
+              characters.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          name: The name of the assistant. The maximum length is 256 characters.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
+              assistant. Tools can be of types `code_interpreter`, `file_search`, or
+              `function`.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not assistant_id:
+            raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/assistants/{assistant_id}",
+            body=maybe_transform(
+                {
+                    "description": description,
+                    "instructions": instructions,
+                    "metadata": metadata,
+                    "model": model,
+                    "name": name,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_resources": tool_resources,
+                    "tools": tools,
+                    "top_p": top_p,
+                },
+                assistant_update_params.AssistantUpdateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Assistant,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[Assistant]:
+        """Returns a list of assistants.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            "/assistants",
+            page=SyncCursorPage[Assistant],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    assistant_list_params.AssistantListParams,
+                ),
+            ),
+            model=Assistant,
+        )
+
+    def delete(
+        self,
+        assistant_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantDeleted:
+        """
+        Delete an assistant.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not assistant_id:
+            raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._delete(
+            f"/assistants/{assistant_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=AssistantDeleted,
+        )
+
+
+class AsyncAssistants(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncAssistantsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncAssistantsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncAssistantsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        model: Union[str, ChatModel],
+        description: Optional[str] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        name: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Assistant:
+        """
+        Create an assistant with a model and instructions.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          description: The description of the assistant. The maximum length is 512 characters.
+
+          instructions: The system instructions that the assistant uses. The maximum length is 256,000
+              characters.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          name: The name of the assistant. The maximum length is 256 characters.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
+              assistant. Tools can be of types `code_interpreter`, `file_search`, or
+              `function`.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            "/assistants",
+            body=await async_maybe_transform(
+                {
+                    "model": model,
+                    "description": description,
+                    "instructions": instructions,
+                    "metadata": metadata,
+                    "name": name,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_resources": tool_resources,
+                    "tools": tools,
+                    "top_p": top_p,
+                },
+                assistant_create_params.AssistantCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Assistant,
+        )
+
+    async def retrieve(
+        self,
+        assistant_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Assistant:
+        """
+        Retrieves an assistant.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not assistant_id:
+            raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/assistants/{assistant_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Assistant,
+        )
+
+    async def update(
+        self,
+        assistant_id: str,
+        *,
+        description: Optional[str] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[
+            str,
+            Literal[
+                "o3-mini",
+                "o3-mini-2025-01-31",
+                "o1",
+                "o1-2024-12-17",
+                "gpt-4o",
+                "gpt-4o-2024-11-20",
+                "gpt-4o-2024-08-06",
+                "gpt-4o-2024-05-13",
+                "gpt-4o-mini",
+                "gpt-4o-mini-2024-07-18",
+                "gpt-4.5-preview",
+                "gpt-4.5-preview-2025-02-27",
+                "gpt-4-turbo",
+                "gpt-4-turbo-2024-04-09",
+                "gpt-4-0125-preview",
+                "gpt-4-turbo-preview",
+                "gpt-4-1106-preview",
+                "gpt-4-vision-preview",
+                "gpt-4",
+                "gpt-4-0314",
+                "gpt-4-0613",
+                "gpt-4-32k",
+                "gpt-4-32k-0314",
+                "gpt-4-32k-0613",
+                "gpt-3.5-turbo",
+                "gpt-3.5-turbo-16k",
+                "gpt-3.5-turbo-0613",
+                "gpt-3.5-turbo-1106",
+                "gpt-3.5-turbo-0125",
+                "gpt-3.5-turbo-16k-0613",
+            ],
+        ]
+        | NotGiven = NOT_GIVEN,
+        name: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Assistant:
+        """Modifies an assistant.
+
+        Args:
+          description: The description of the assistant.
+
+        The maximum length is 512 characters.
+
+          instructions: The system instructions that the assistant uses. The maximum length is 256,000
+              characters.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          name: The name of the assistant. The maximum length is 256 characters.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
+              assistant. Tools can be of types `code_interpreter`, `file_search`, or
+              `function`.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not assistant_id:
+            raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/assistants/{assistant_id}",
+            body=await async_maybe_transform(
+                {
+                    "description": description,
+                    "instructions": instructions,
+                    "metadata": metadata,
+                    "model": model,
+                    "name": name,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_resources": tool_resources,
+                    "tools": tools,
+                    "top_p": top_p,
+                },
+                assistant_update_params.AssistantUpdateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Assistant,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[Assistant, AsyncCursorPage[Assistant]]:
+        """Returns a list of assistants.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            "/assistants",
+            page=AsyncCursorPage[Assistant],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    assistant_list_params.AssistantListParams,
+                ),
+            ),
+            model=Assistant,
+        )
+
+    async def delete(
+        self,
+        assistant_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantDeleted:
+        """
+        Delete an assistant.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not assistant_id:
+            raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._delete(
+            f"/assistants/{assistant_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=AssistantDeleted,
+        )
+
+
+class AssistantsWithRawResponse:
+    def __init__(self, assistants: Assistants) -> None:
+        self._assistants = assistants
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            assistants.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            assistants.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            assistants.update,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            assistants.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            assistants.delete,
+        )
+
+
+class AsyncAssistantsWithRawResponse:
+    def __init__(self, assistants: AsyncAssistants) -> None:
+        self._assistants = assistants
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            assistants.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            assistants.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            assistants.update,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            assistants.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            assistants.delete,
+        )
+
+
+class AssistantsWithStreamingResponse:
+    def __init__(self, assistants: Assistants) -> None:
+        self._assistants = assistants
+
+        self.create = to_streamed_response_wrapper(
+            assistants.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            assistants.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            assistants.update,
+        )
+        self.list = to_streamed_response_wrapper(
+            assistants.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            assistants.delete,
+        )
+
+
+class AsyncAssistantsWithStreamingResponse:
+    def __init__(self, assistants: AsyncAssistants) -> None:
+        self._assistants = assistants
+
+        self.create = async_to_streamed_response_wrapper(
+            assistants.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            assistants.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            assistants.update,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            assistants.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            assistants.delete,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/beta.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/beta.py
new file mode 100644
index 00000000..62fc8258
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/beta.py
@@ -0,0 +1,175 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from .chat.chat import Chat, AsyncChat
+from .assistants import (
+    Assistants,
+    AsyncAssistants,
+    AssistantsWithRawResponse,
+    AsyncAssistantsWithRawResponse,
+    AssistantsWithStreamingResponse,
+    AsyncAssistantsWithStreamingResponse,
+)
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .threads.threads import (
+    Threads,
+    AsyncThreads,
+    ThreadsWithRawResponse,
+    AsyncThreadsWithRawResponse,
+    ThreadsWithStreamingResponse,
+    AsyncThreadsWithStreamingResponse,
+)
+from .realtime.realtime import (
+    Realtime,
+    AsyncRealtime,
+    RealtimeWithRawResponse,
+    AsyncRealtimeWithRawResponse,
+    RealtimeWithStreamingResponse,
+    AsyncRealtimeWithStreamingResponse,
+)
+
+__all__ = ["Beta", "AsyncBeta"]
+
+
+class Beta(SyncAPIResource):
+    @cached_property
+    def chat(self) -> Chat:
+        return Chat(self._client)
+
+    @cached_property
+    def realtime(self) -> Realtime:
+        return Realtime(self._client)
+
+    @cached_property
+    def assistants(self) -> Assistants:
+        return Assistants(self._client)
+
+    @cached_property
+    def threads(self) -> Threads:
+        return Threads(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> BetaWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return BetaWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> BetaWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return BetaWithStreamingResponse(self)
+
+
+class AsyncBeta(AsyncAPIResource):
+    @cached_property
+    def chat(self) -> AsyncChat:
+        return AsyncChat(self._client)
+
+    @cached_property
+    def realtime(self) -> AsyncRealtime:
+        return AsyncRealtime(self._client)
+
+    @cached_property
+    def assistants(self) -> AsyncAssistants:
+        return AsyncAssistants(self._client)
+
+    @cached_property
+    def threads(self) -> AsyncThreads:
+        return AsyncThreads(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncBetaWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncBetaWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncBetaWithStreamingResponse(self)
+
+
+class BetaWithRawResponse:
+    def __init__(self, beta: Beta) -> None:
+        self._beta = beta
+
+    @cached_property
+    def realtime(self) -> RealtimeWithRawResponse:
+        return RealtimeWithRawResponse(self._beta.realtime)
+
+    @cached_property
+    def assistants(self) -> AssistantsWithRawResponse:
+        return AssistantsWithRawResponse(self._beta.assistants)
+
+    @cached_property
+    def threads(self) -> ThreadsWithRawResponse:
+        return ThreadsWithRawResponse(self._beta.threads)
+
+
+class AsyncBetaWithRawResponse:
+    def __init__(self, beta: AsyncBeta) -> None:
+        self._beta = beta
+
+    @cached_property
+    def realtime(self) -> AsyncRealtimeWithRawResponse:
+        return AsyncRealtimeWithRawResponse(self._beta.realtime)
+
+    @cached_property
+    def assistants(self) -> AsyncAssistantsWithRawResponse:
+        return AsyncAssistantsWithRawResponse(self._beta.assistants)
+
+    @cached_property
+    def threads(self) -> AsyncThreadsWithRawResponse:
+        return AsyncThreadsWithRawResponse(self._beta.threads)
+
+
+class BetaWithStreamingResponse:
+    def __init__(self, beta: Beta) -> None:
+        self._beta = beta
+
+    @cached_property
+    def realtime(self) -> RealtimeWithStreamingResponse:
+        return RealtimeWithStreamingResponse(self._beta.realtime)
+
+    @cached_property
+    def assistants(self) -> AssistantsWithStreamingResponse:
+        return AssistantsWithStreamingResponse(self._beta.assistants)
+
+    @cached_property
+    def threads(self) -> ThreadsWithStreamingResponse:
+        return ThreadsWithStreamingResponse(self._beta.threads)
+
+
+class AsyncBetaWithStreamingResponse:
+    def __init__(self, beta: AsyncBeta) -> None:
+        self._beta = beta
+
+    @cached_property
+    def realtime(self) -> AsyncRealtimeWithStreamingResponse:
+        return AsyncRealtimeWithStreamingResponse(self._beta.realtime)
+
+    @cached_property
+    def assistants(self) -> AsyncAssistantsWithStreamingResponse:
+        return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
+
+    @cached_property
+    def threads(self) -> AsyncThreadsWithStreamingResponse:
+        return AsyncThreadsWithStreamingResponse(self._beta.threads)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/__init__.py
new file mode 100644
index 00000000..072d7867
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/__init__.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .chat import Chat, AsyncChat
+from .completions import Completions, AsyncCompletions
+
+__all__ = [
+    "Completions",
+    "AsyncCompletions",
+    "Chat",
+    "AsyncChat",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/chat.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/chat.py
new file mode 100644
index 00000000..6afdcea3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/chat.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ...._compat import cached_property
+from .completions import Completions, AsyncCompletions
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["Chat", "AsyncChat"]
+
+
+class Chat(SyncAPIResource):
+    @cached_property
+    def completions(self) -> Completions:
+        return Completions(self._client)
+
+
+class AsyncChat(AsyncAPIResource):
+    @cached_property
+    def completions(self) -> AsyncCompletions:
+        return AsyncCompletions(self._client)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/completions.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/completions.py
new file mode 100644
index 00000000..545a3f40
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/chat/completions.py
@@ -0,0 +1,634 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Type, Union, Iterable, Optional, cast
+from functools import partial
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._streaming import Stream
+from ....types.chat import completion_create_params
+from ...._base_client import make_request_options
+from ....lib._parsing import (
+    ResponseFormatT,
+    validate_input_tools as _validate_input_tools,
+    parse_chat_completion as _parse_chat_completion,
+    type_to_response_format_param as _type_to_response_format,
+)
+from ....types.chat_model import ChatModel
+from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
+from ....types.shared_params import Metadata, ReasoningEffort
+from ....types.chat.chat_completion import ChatCompletion
+from ....types.chat.chat_completion_chunk import ChatCompletionChunk
+from ....types.chat.parsed_chat_completion import ParsedChatCompletion
+from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
+from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
+from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
+from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
+from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
+
+__all__ = ["Completions", "AsyncCompletions"]
+
+
+class Completions(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> CompletionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return the
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return CompletionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> CompletionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return CompletionsWithStreamingResponse(self)
+
+    def parse(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedChatCompletion[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
+        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+        You can pass a pydantic model to this method and it will automatically convert the model
+        into a JSON schema, send it to the API and parse the response content back into the given model.
+
+        This method will also automatically parse `function` tool calls if:
+        - You use the `openai.pydantic_function_tool()` helper method
+        - You mark your tool schema with `"strict": True`
+
+        Example usage:
+        ```py
+        from pydantic import BaseModel
+        from openai import OpenAI
+
+
+        class Step(BaseModel):
+            explanation: str
+            output: str
+
+
+        class MathResponse(BaseModel):
+            steps: List[Step]
+            final_answer: str
+
+
+        client = OpenAI()
+        completion = client.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {"role": "system", "content": "You are a helpful math tutor."},
+                {"role": "user", "content": "solve 8x + 31 = 2"},
+            ],
+            response_format=MathResponse,
+        )
+
+        message = completion.choices[0].message
+        if message.parsed:
+            print(message.parsed.steps)
+            print("answer: ", message.parsed.final_answer)
+        ```
+        """
+        _validate_input_tools(tools)
+
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.parse",
+            **(extra_headers or {}),
+        }
+
+        def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
+            return _parse_chat_completion(
+                response_format=response_format,
+                chat_completion=raw_completion,
+                input_tools=tools,
+            )
+
+        return self._post(
+            "/chat/completions",
+            body=maybe_transform(
+                {
+                    "messages": messages,
+                    "model": model,
+                    "audio": audio,
+                    "frequency_penalty": frequency_penalty,
+                    "function_call": function_call,
+                    "functions": functions,
+                    "logit_bias": logit_bias,
+                    "logprobs": logprobs,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_tokens": max_tokens,
+                    "metadata": metadata,
+                    "modalities": modalities,
+                    "n": n,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "prediction": prediction,
+                    "presence_penalty": presence_penalty,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": _type_to_response_format(response_format),
+                    "seed": seed,
+                    "service_tier": service_tier,
+                    "stop": stop,
+                    "store": store,
+                    "stream": False,
+                    "stream_options": stream_options,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_logprobs": top_logprobs,
+                    "top_p": top_p,
+                    "user": user,
+                    "web_search_options": web_search_options,
+                },
+                completion_create_params.CompletionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
+            stream=False,
+        )
+
+    def stream(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletionStreamManager[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
+        and automatic accumulation of each delta.
+
+        This also supports all of the parsing utilities that `.parse()` does.
+
+        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+        ```py
+        with client.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[...],
+        ) as stream:
+            for event in stream:
+                if event.type == "content.delta":
+                    print(event.delta, flush=True, end="")
+        ```
+
+        When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
+
+        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+        the context manager.
+        """
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.stream",
+            **(extra_headers or {}),
+        }
+
+        api_request: partial[Stream[ChatCompletionChunk]] = partial(
+            self._client.chat.completions.create,
+            messages=messages,
+            model=model,
+            audio=audio,
+            stream=True,
+            response_format=_type_to_response_format(response_format),
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_completion_tokens=max_completion_tokens,
+            max_tokens=max_tokens,
+            metadata=metadata,
+            modalities=modalities,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            prediction=prediction,
+            presence_penalty=presence_penalty,
+            reasoning_effort=reasoning_effort,
+            seed=seed,
+            service_tier=service_tier,
+            store=store,
+            stop=stop,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+            web_search_options=web_search_options,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return ChatCompletionStreamManager(
+            api_request,
+            response_format=response_format,
+            input_tools=tools,
+        )
+
+
+class AsyncCompletions(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return the
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncCompletionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncCompletionsWithStreamingResponse(self)
+
+    async def parse(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedChatCompletion[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
+        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+        You can pass a pydantic model to this method and it will automatically convert the model
+        into a JSON schema, send it to the API and parse the response content back into the given model.
+
+        This method will also automatically parse `function` tool calls if:
+        - You use the `openai.pydantic_function_tool()` helper method
+        - You mark your tool schema with `"strict": True`
+
+        Example usage:
+        ```py
+        from pydantic import BaseModel
+        from openai import AsyncOpenAI
+
+
+        class Step(BaseModel):
+            explanation: str
+            output: str
+
+
+        class MathResponse(BaseModel):
+            steps: List[Step]
+            final_answer: str
+
+
+        client = AsyncOpenAI()
+        completion = await client.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {"role": "system", "content": "You are a helpful math tutor."},
+                {"role": "user", "content": "solve 8x + 31 = 2"},
+            ],
+            response_format=MathResponse,
+        )
+
+        message = completion.choices[0].message
+        if message.parsed:
+            print(message.parsed.steps)
+            print("answer: ", message.parsed.final_answer)
+        ```
+        """
+        _validate_input_tools(tools)
+
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.parse",
+            **(extra_headers or {}),
+        }
+
+        def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
+            return _parse_chat_completion(
+                response_format=response_format,
+                chat_completion=raw_completion,
+                input_tools=tools,
+            )
+
+        return await self._post(
+            "/chat/completions",
+            body=await async_maybe_transform(
+                {
+                    "messages": messages,
+                    "model": model,
+                    "audio": audio,
+                    "frequency_penalty": frequency_penalty,
+                    "function_call": function_call,
+                    "functions": functions,
+                    "logit_bias": logit_bias,
+                    "logprobs": logprobs,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_tokens": max_tokens,
+                    "metadata": metadata,
+                    "modalities": modalities,
+                    "n": n,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "prediction": prediction,
+                    "presence_penalty": presence_penalty,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": _type_to_response_format(response_format),
+                    "seed": seed,
+                    "service_tier": service_tier,
+                    "store": store,
+                    "stop": stop,
+                    "stream": False,
+                    "stream_options": stream_options,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_logprobs": top_logprobs,
+                    "top_p": top_p,
+                    "user": user,
+                    "web_search_options": web_search_options,
+                },
+                completion_create_params.CompletionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
+            stream=False,
+        )
+
+    def stream(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
+        and automatic accumulation of each delta.
+
+        This also supports all of the parsing utilities that `.parse()` does.
+
+        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+        ```py
+        async with client.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[...],
+        ) as stream:
+            async for event in stream:
+                if event.type == "content.delta":
+                    print(event.delta, flush=True, end="")
+        ```
+
+        When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
+
+        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+        the context manager.
+        """
+        _validate_input_tools(tools)
+
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.stream",
+            **(extra_headers or {}),
+        }
+
+        api_request = self._client.chat.completions.create(
+            messages=messages,
+            model=model,
+            audio=audio,
+            stream=True,
+            response_format=_type_to_response_format(response_format),
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_completion_tokens=max_completion_tokens,
+            max_tokens=max_tokens,
+            metadata=metadata,
+            modalities=modalities,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            prediction=prediction,
+            presence_penalty=presence_penalty,
+            reasoning_effort=reasoning_effort,
+            seed=seed,
+            service_tier=service_tier,
+            stop=stop,
+            store=store,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+            web_search_options=web_search_options,
+        )
+        return AsyncChatCompletionStreamManager(
+            api_request,
+            response_format=response_format,
+            input_tools=tools,
+        )
+
+
+class CompletionsWithRawResponse:
+    def __init__(self, completions: Completions) -> None:
+        self._completions = completions
+
+        self.parse = _legacy_response.to_raw_response_wrapper(
+            completions.parse,
+        )
+
+
+class AsyncCompletionsWithRawResponse:
+    def __init__(self, completions: AsyncCompletions) -> None:
+        self._completions = completions
+
+        self.parse = _legacy_response.async_to_raw_response_wrapper(
+            completions.parse,
+        )
+
+
+class CompletionsWithStreamingResponse:
+    def __init__(self, completions: Completions) -> None:
+        self._completions = completions
+
+        self.parse = to_streamed_response_wrapper(
+            completions.parse,
+        )
+
+
+class AsyncCompletionsWithStreamingResponse:
+    def __init__(self, completions: AsyncCompletions) -> None:
+        self._completions = completions
+
+        self.parse = async_to_streamed_response_wrapper(
+            completions.parse,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/__init__.py
new file mode 100644
index 00000000..7ab3d993
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .realtime import (
+    Realtime,
+    AsyncRealtime,
+    RealtimeWithRawResponse,
+    AsyncRealtimeWithRawResponse,
+    RealtimeWithStreamingResponse,
+    AsyncRealtimeWithStreamingResponse,
+)
+from .sessions import (
+    Sessions,
+    AsyncSessions,
+    SessionsWithRawResponse,
+    AsyncSessionsWithRawResponse,
+    SessionsWithStreamingResponse,
+    AsyncSessionsWithStreamingResponse,
+)
+from .transcription_sessions import (
+    TranscriptionSessions,
+    AsyncTranscriptionSessions,
+    TranscriptionSessionsWithRawResponse,
+    AsyncTranscriptionSessionsWithRawResponse,
+    TranscriptionSessionsWithStreamingResponse,
+    AsyncTranscriptionSessionsWithStreamingResponse,
+)
+
+__all__ = [
+    "Sessions",
+    "AsyncSessions",
+    "SessionsWithRawResponse",
+    "AsyncSessionsWithRawResponse",
+    "SessionsWithStreamingResponse",
+    "AsyncSessionsWithStreamingResponse",
+    "TranscriptionSessions",
+    "AsyncTranscriptionSessions",
+    "TranscriptionSessionsWithRawResponse",
+    "AsyncTranscriptionSessionsWithRawResponse",
+    "TranscriptionSessionsWithStreamingResponse",
+    "AsyncTranscriptionSessionsWithStreamingResponse",
+    "Realtime",
+    "AsyncRealtime",
+    "RealtimeWithRawResponse",
+    "AsyncRealtimeWithRawResponse",
+    "RealtimeWithStreamingResponse",
+    "AsyncRealtimeWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/realtime.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/realtime.py
new file mode 100644
index 00000000..76e57f8c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/realtime.py
@@ -0,0 +1,1066 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import json
+import logging
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Iterator, cast
+from typing_extensions import AsyncIterator
+
+import httpx
+from pydantic import BaseModel
+
+from .sessions import (
+    Sessions,
+    AsyncSessions,
+    SessionsWithRawResponse,
+    AsyncSessionsWithRawResponse,
+    SessionsWithStreamingResponse,
+    AsyncSessionsWithStreamingResponse,
+)
+from ...._types import NOT_GIVEN, Query, Headers, NotGiven
+from ...._utils import (
+    is_azure_client,
+    maybe_transform,
+    strip_not_given,
+    async_maybe_transform,
+    is_async_azure_client,
+)
+from ...._compat import cached_property
+from ...._models import construct_type_unchecked
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._exceptions import OpenAIError
+from ...._base_client import _merge_mappings
+from ....types.beta.realtime import (
+    session_update_event_param,
+    response_create_event_param,
+    transcription_session_update_param,
+)
+from .transcription_sessions import (
+    TranscriptionSessions,
+    AsyncTranscriptionSessions,
+    TranscriptionSessionsWithRawResponse,
+    AsyncTranscriptionSessionsWithRawResponse,
+    TranscriptionSessionsWithStreamingResponse,
+    AsyncTranscriptionSessionsWithStreamingResponse,
+)
+from ....types.websocket_connection_options import WebsocketConnectionOptions
+from ....types.beta.realtime.realtime_client_event import RealtimeClientEvent
+from ....types.beta.realtime.realtime_server_event import RealtimeServerEvent
+from ....types.beta.realtime.conversation_item_param import ConversationItemParam
+from ....types.beta.realtime.realtime_client_event_param import RealtimeClientEventParam
+
+if TYPE_CHECKING:
+    from websockets.sync.client import ClientConnection as WebsocketConnection
+    from websockets.asyncio.client import ClientConnection as AsyncWebsocketConnection
+
+    from ...._client import OpenAI, AsyncOpenAI
+
+__all__ = ["Realtime", "AsyncRealtime"]
+
+log: logging.Logger = logging.getLogger(__name__)
+
+
+class Realtime(SyncAPIResource):
+    @cached_property
+    def sessions(self) -> Sessions:
+        return Sessions(self._client)
+
+    @cached_property
+    def transcription_sessions(self) -> TranscriptionSessions:
+        return TranscriptionSessions(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> RealtimeWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return RealtimeWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> RealtimeWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return RealtimeWithStreamingResponse(self)
+
+    def connect(
+        self,
+        *,
+        model: str,
+        extra_query: Query = {},
+        extra_headers: Headers = {},
+        websocket_connection_options: WebsocketConnectionOptions = {},
+    ) -> RealtimeConnectionManager:
+        """
+        The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
+
+        Some notable benefits of the API include:
+
+        - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
+        - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
+        - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
+
+        The Realtime API is a stateful, event-based API that communicates over a WebSocket.
+        """
+        return RealtimeConnectionManager(
+            client=self._client,
+            extra_query=extra_query,
+            extra_headers=extra_headers,
+            websocket_connection_options=websocket_connection_options,
+            model=model,
+        )
+
+
+class AsyncRealtime(AsyncAPIResource):
+    @cached_property
+    def sessions(self) -> AsyncSessions:
+        return AsyncSessions(self._client)
+
+    @cached_property
+    def transcription_sessions(self) -> AsyncTranscriptionSessions:
+        return AsyncTranscriptionSessions(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncRealtimeWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncRealtimeWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncRealtimeWithStreamingResponse(self)
+
+    def connect(
+        self,
+        *,
+        model: str,
+        extra_query: Query = {},
+        extra_headers: Headers = {},
+        websocket_connection_options: WebsocketConnectionOptions = {},
+    ) -> AsyncRealtimeConnectionManager:
+        """
+        The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
+
+        Some notable benefits of the API include:
+
+        - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
+        - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
+        - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
+
+        The Realtime API is a stateful, event-based API that communicates over a WebSocket.
+        """
+        return AsyncRealtimeConnectionManager(
+            client=self._client,
+            extra_query=extra_query,
+            extra_headers=extra_headers,
+            websocket_connection_options=websocket_connection_options,
+            model=model,
+        )
+
+
+class RealtimeWithRawResponse:
+    def __init__(self, realtime: Realtime) -> None:
+        self._realtime = realtime
+
+    @cached_property
+    def sessions(self) -> SessionsWithRawResponse:
+        return SessionsWithRawResponse(self._realtime.sessions)
+
+    @cached_property
+    def transcription_sessions(self) -> TranscriptionSessionsWithRawResponse:
+        return TranscriptionSessionsWithRawResponse(self._realtime.transcription_sessions)
+
+
+class AsyncRealtimeWithRawResponse:
+    def __init__(self, realtime: AsyncRealtime) -> None:
+        self._realtime = realtime
+
+    @cached_property
+    def sessions(self) -> AsyncSessionsWithRawResponse:
+        return AsyncSessionsWithRawResponse(self._realtime.sessions)
+
+    @cached_property
+    def transcription_sessions(self) -> AsyncTranscriptionSessionsWithRawResponse:
+        return AsyncTranscriptionSessionsWithRawResponse(self._realtime.transcription_sessions)
+
+
+class RealtimeWithStreamingResponse:
+    def __init__(self, realtime: Realtime) -> None:
+        self._realtime = realtime
+
+    @cached_property
+    def sessions(self) -> SessionsWithStreamingResponse:
+        return SessionsWithStreamingResponse(self._realtime.sessions)
+
+    @cached_property
+    def transcription_sessions(self) -> TranscriptionSessionsWithStreamingResponse:
+        return TranscriptionSessionsWithStreamingResponse(self._realtime.transcription_sessions)
+
+
+class AsyncRealtimeWithStreamingResponse:
+    def __init__(self, realtime: AsyncRealtime) -> None:
+        self._realtime = realtime
+
+    @cached_property
+    def sessions(self) -> AsyncSessionsWithStreamingResponse:
+        return AsyncSessionsWithStreamingResponse(self._realtime.sessions)
+
+    @cached_property
+    def transcription_sessions(self) -> AsyncTranscriptionSessionsWithStreamingResponse:
+        return AsyncTranscriptionSessionsWithStreamingResponse(self._realtime.transcription_sessions)
+
+
+class AsyncRealtimeConnection:
+    """Represents a live websocket connection to the Realtime API"""
+
+    session: AsyncRealtimeSessionResource
+    response: AsyncRealtimeResponseResource
+    input_audio_buffer: AsyncRealtimeInputAudioBufferResource
+    conversation: AsyncRealtimeConversationResource
+    transcription_session: AsyncRealtimeTranscriptionSessionResource
+
+    _connection: AsyncWebsocketConnection
+
+    def __init__(self, connection: AsyncWebsocketConnection) -> None:
+        self._connection = connection
+
+        self.session = AsyncRealtimeSessionResource(self)
+        self.response = AsyncRealtimeResponseResource(self)
+        self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self)
+        self.conversation = AsyncRealtimeConversationResource(self)
+        self.transcription_session = AsyncRealtimeTranscriptionSessionResource(self)
+
+    async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]:
+        """
+        An infinite-iterator that will continue to yield events until
+        the connection is closed.
+        """
+        from websockets.exceptions import ConnectionClosedOK
+
+        try:
+            while True:
+                yield await self.recv()
+        except ConnectionClosedOK:
+            return
+
+    async def recv(self) -> RealtimeServerEvent:
+        """
+        Receive the next message from the connection and parses it into a `RealtimeServerEvent` object.
+
+        Canceling this method is safe. There's no risk of losing data.
+        """
+        return self.parse_event(await self.recv_bytes())
+
+    async def recv_bytes(self) -> bytes:
+        """Receive the next message from the connection as raw bytes.
+
+        Canceling this method is safe. There's no risk of losing data.
+
+        If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does,
+        then you can call `.parse_event(data)`.
+        """
+        message = await self._connection.recv(decode=False)
+        log.debug(f"Received websocket message: %s", message)
+        if not isinstance(message, bytes):
+            # passing `decode=False` should always result in us getting `bytes` back
+            raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}")
+
+        return message
+
+    async def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
+        data = (
+            event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
+            if isinstance(event, BaseModel)
+            else json.dumps(await async_maybe_transform(event, RealtimeClientEventParam))
+        )
+        await self._connection.send(data)
+
+    async def close(self, *, code: int = 1000, reason: str = "") -> None:
+        await self._connection.close(code=code, reason=reason)
+
+    def parse_event(self, data: str | bytes) -> RealtimeServerEvent:
+        """
+        Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object.
+
+        This is helpful if you're using `.recv_bytes()`.
+        """
+        return cast(
+            RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent))
+        )
+
+
+class AsyncRealtimeConnectionManager:
+    """
+    Context manager over a `AsyncRealtimeConnection` that is returned by `beta.realtime.connect()`
+
+    This context manager ensures that the connection will be closed when it exits.
+
+    ---
+
+    Note that if your application doesn't work well with the context manager approach then you
+    can call the `.enter()` method directly to initiate a connection.
+
+    **Warning**: You must remember to close the connection with `.close()`.
+
+    ```py
+    connection = await client.beta.realtime.connect(...).enter()
+    # ...
+    await connection.close()
+    ```
+    """
+
+    def __init__(
+        self,
+        *,
+        client: AsyncOpenAI,
+        model: str,
+        extra_query: Query,
+        extra_headers: Headers,
+        websocket_connection_options: WebsocketConnectionOptions,
+    ) -> None:
+        self.__client = client
+        self.__model = model
+        self.__connection: AsyncRealtimeConnection | None = None
+        self.__extra_query = extra_query
+        self.__extra_headers = extra_headers
+        self.__websocket_connection_options = websocket_connection_options
+
+    async def __aenter__(self) -> AsyncRealtimeConnection:
+        """
+        👋 If your application doesn't work well with the context manager approach then you
+        can call this method directly to initiate a connection.
+
+        **Warning**: You must remember to close the connection with `.close()`.
+
+        ```py
+        connection = await client.beta.realtime.connect(...).enter()
+        # ...
+        await connection.close()
+        ```
+        """
+        try:
+            from websockets.asyncio.client import connect
+        except ImportError as exc:
+            raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
+
+        extra_query = self.__extra_query
+        auth_headers = self.__client.auth_headers
+        if is_async_azure_client(self.__client):
+            url, auth_headers = await self.__client._configure_realtime(self.__model, extra_query)
+        else:
+            url = self._prepare_url().copy_with(
+                params={
+                    **self.__client.base_url.params,
+                    "model": self.__model,
+                    **extra_query,
+                },
+            )
+        log.debug("Connecting to %s", url)
+        if self.__websocket_connection_options:
+            log.debug("Connection options: %s", self.__websocket_connection_options)
+
+        self.__connection = AsyncRealtimeConnection(
+            await connect(
+                str(url),
+                user_agent_header=self.__client.user_agent,
+                additional_headers=_merge_mappings(
+                    {
+                        **auth_headers,
+                        "OpenAI-Beta": "realtime=v1",
+                    },
+                    self.__extra_headers,
+                ),
+                **self.__websocket_connection_options,
+            )
+        )
+
+        return self.__connection
+
+    enter = __aenter__
+
+    def _prepare_url(self) -> httpx.URL:
+        if self.__client.websocket_base_url is not None:
+            base_url = httpx.URL(self.__client.websocket_base_url)
+        else:
+            base_url = self.__client._base_url.copy_with(scheme="wss")
+
+        merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
+        return base_url.copy_with(raw_path=merge_raw_path)
+
+    async def __aexit__(
+        self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
+    ) -> None:
+        if self.__connection is not None:
+            await self.__connection.close()
+
+
+class RealtimeConnection:
+    """Represents a live websocket connection to the Realtime API"""
+
+    session: RealtimeSessionResource
+    response: RealtimeResponseResource
+    input_audio_buffer: RealtimeInputAudioBufferResource
+    conversation: RealtimeConversationResource
+    transcription_session: RealtimeTranscriptionSessionResource
+
+    _connection: WebsocketConnection
+
+    def __init__(self, connection: WebsocketConnection) -> None:
+        self._connection = connection
+
+        self.session = RealtimeSessionResource(self)
+        self.response = RealtimeResponseResource(self)
+        self.input_audio_buffer = RealtimeInputAudioBufferResource(self)
+        self.conversation = RealtimeConversationResource(self)
+        self.transcription_session = RealtimeTranscriptionSessionResource(self)
+
+    def __iter__(self) -> Iterator[RealtimeServerEvent]:
+        """
+        An infinite-iterator that will continue to yield events until
+        the connection is closed.
+        """
+        from websockets.exceptions import ConnectionClosedOK
+
+        try:
+            while True:
+                yield self.recv()
+        except ConnectionClosedOK:
+            return
+
+    def recv(self) -> RealtimeServerEvent:
+        """
+        Receive the next message from the connection and parses it into a `RealtimeServerEvent` object.
+
+        Canceling this method is safe. There's no risk of losing data.
+        """
+        return self.parse_event(self.recv_bytes())
+
+    def recv_bytes(self) -> bytes:
+        """Receive the next message from the connection as raw bytes.
+
+        Canceling this method is safe. There's no risk of losing data.
+
+        If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does,
+        then you can call `.parse_event(data)`.
+        """
+        message = self._connection.recv(decode=False)
+        log.debug(f"Received websocket message: %s", message)
+        if not isinstance(message, bytes):
+            # passing `decode=False` should always result in us getting `bytes` back
+            raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}")
+
+        return message
+
+    def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
+        data = (
+            event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
+            if isinstance(event, BaseModel)
+            else json.dumps(maybe_transform(event, RealtimeClientEventParam))
+        )
+        self._connection.send(data)
+
+    def close(self, *, code: int = 1000, reason: str = "") -> None:
+        self._connection.close(code=code, reason=reason)
+
+    def parse_event(self, data: str | bytes) -> RealtimeServerEvent:
+        """
+        Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object.
+
+        This is helpful if you're using `.recv_bytes()`.
+        """
+        return cast(
+            RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent))
+        )
+
+
+class RealtimeConnectionManager:
+    """
+    Context manager over a `RealtimeConnection` that is returned by `beta.realtime.connect()`
+
+    This context manager ensures that the connection will be closed when it exits.
+
+    ---
+
+    Note that if your application doesn't work well with the context manager approach then you
+    can call the `.enter()` method directly to initiate a connection.
+
+    **Warning**: You must remember to close the connection with `.close()`.
+
+    ```py
+    connection = client.beta.realtime.connect(...).enter()
+    # ...
+    connection.close()
+    ```
+    """
+
+    def __init__(
+        self,
+        *,
+        client: OpenAI,
+        model: str,
+        extra_query: Query,
+        extra_headers: Headers,
+        websocket_connection_options: WebsocketConnectionOptions,
+    ) -> None:
+        self.__client = client
+        self.__model = model
+        self.__connection: RealtimeConnection | None = None
+        self.__extra_query = extra_query
+        self.__extra_headers = extra_headers
+        self.__websocket_connection_options = websocket_connection_options
+
+    def __enter__(self) -> RealtimeConnection:
+        """
+        👋 If your application doesn't work well with the context manager approach then you
+        can call this method directly to initiate a connection.
+
+        **Warning**: You must remember to close the connection with `.close()`.
+
+        ```py
+        connection = client.beta.realtime.connect(...).enter()
+        # ...
+        connection.close()
+        ```
+        """
+        try:
+            from websockets.sync.client import connect
+        except ImportError as exc:
+            raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
+
+        extra_query = self.__extra_query
+        auth_headers = self.__client.auth_headers
+        if is_azure_client(self.__client):
+            url, auth_headers = self.__client._configure_realtime(self.__model, extra_query)
+        else:
+            url = self._prepare_url().copy_with(
+                params={
+                    **self.__client.base_url.params,
+                    "model": self.__model,
+                    **extra_query,
+                },
+            )
+        log.debug("Connecting to %s", url)
+        if self.__websocket_connection_options:
+            log.debug("Connection options: %s", self.__websocket_connection_options)
+
+        self.__connection = RealtimeConnection(
+            connect(
+                str(url),
+                user_agent_header=self.__client.user_agent,
+                additional_headers=_merge_mappings(
+                    {
+                        **auth_headers,
+                        "OpenAI-Beta": "realtime=v1",
+                    },
+                    self.__extra_headers,
+                ),
+                **self.__websocket_connection_options,
+            )
+        )
+
+        return self.__connection
+
+    enter = __enter__
+
+    def _prepare_url(self) -> httpx.URL:
+        if self.__client.websocket_base_url is not None:
+            base_url = httpx.URL(self.__client.websocket_base_url)
+        else:
+            base_url = self.__client._base_url.copy_with(scheme="wss")
+
+        merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
+        return base_url.copy_with(raw_path=merge_raw_path)
+
+    def __exit__(
+        self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
+    ) -> None:
+        if self.__connection is not None:
+            self.__connection.close()
+
+
+class BaseRealtimeConnectionResource:
+    def __init__(self, connection: RealtimeConnection) -> None:
+        self._connection = connection
+
+
+class RealtimeSessionResource(BaseRealtimeConnectionResource):
+    def update(self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """
+        Send this event to update the session’s default configuration.
+        The client may send this event at any time to update any field,
+        except for `voice`. However, note that once a session has been
+        initialized with a particular `model`, it can’t be changed to
+        another model using `session.update`.
+
+        When the server receives a `session.update`, it will respond
+        with a `session.updated` event showing the full, effective configuration.
+        Only the fields that are present are updated. To clear a field like
+        `instructions`, pass an empty string.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "session.update", "session": session, "event_id": event_id}),
+            )
+        )
+
+
+class RealtimeResponseResource(BaseRealtimeConnectionResource):
+    def create(
+        self,
+        *,
+        event_id: str | NotGiven = NOT_GIVEN,
+        response: response_create_event_param.Response | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        This event instructs the server to create a Response, which means triggering
+        model inference. When in Server VAD mode, the server will create Responses
+        automatically.
+
+        A Response will include at least one Item, and may have two, in which case
+        the second will be a function call. These Items will be appended to the
+        conversation history.
+
+        The server will respond with a `response.created` event, events for Items
+        and content created, and finally a `response.done` event to indicate the
+        Response is complete.
+
+        The `response.create` event includes inference configuration like
+        `instructions`, and `temperature`. These fields will override the Session's
+        configuration for this Response only.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
+            )
+        )
+
+    def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event to cancel an in-progress response.
+
+        The server will respond
+        with a `response.cancelled` event or an error if there is no response to
+        cancel.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
+            )
+        )
+
+
+class RealtimeInputAudioBufferResource(BaseRealtimeConnectionResource):
+    def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event to clear the audio bytes in the buffer.
+
+        The server will
+        respond with an `input_audio_buffer.cleared` event.
+        """
+        self._connection.send(
+            cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
+        )
+
+    def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """
+        Send this event to commit the user input audio buffer, which will create a
+        new user message item in the conversation. This event will produce an error
+        if the input audio buffer is empty. When in Server VAD mode, the client does
+        not need to send this event, the server will commit the audio buffer
+        automatically.
+
+        Committing the input audio buffer will trigger input audio transcription
+        (if enabled in session configuration), but it will not create a response
+        from the model. The server will respond with an `input_audio_buffer.committed`
+        event.
+        """
+        self._connection.send(
+            cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
+        )
+
+    def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event to append audio bytes to the input audio buffer.
+
+        The audio
+        buffer is temporary storage you can write to and later commit. In Server VAD
+        mode, the audio buffer is used to detect speech and the server will decide
+        when to commit. When Server VAD is disabled, you must commit the audio buffer
+        manually.
+
+        The client may choose how much audio to place in each event up to a maximum
+        of 15 MiB, for example streaming smaller chunks from the client may allow the
+        VAD to be more responsive. Unlike made other client events, the server will
+        not send a confirmation response to this event.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
+            )
+        )
+
+
+class RealtimeConversationResource(BaseRealtimeConnectionResource):
+    @cached_property
+    def item(self) -> RealtimeConversationItemResource:
+        return RealtimeConversationItemResource(self._connection)
+
+
+class RealtimeConversationItemResource(BaseRealtimeConnectionResource):
+    def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event when you want to remove any item from the conversation
+        history.
+
+        The server will respond with a `conversation.item.deleted` event,
+        unless the item does not exist in the conversation history, in which case the
+        server will respond with an error.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}),
+            )
+        )
+
+    def create(
+        self,
+        *,
+        item: ConversationItemParam,
+        event_id: str | NotGiven = NOT_GIVEN,
+        previous_item_id: str | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Add a new Item to the Conversation's context, including messages, function
+        calls, and function call responses. This event can be used both to populate a
+        "history" of the conversation and to add new items mid-stream, but has the
+        current limitation that it cannot populate assistant audio messages.
+
+        If successful, the server will respond with a `conversation.item.created`
+        event, otherwise an `error` event will be sent.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given(
+                    {
+                        "type": "conversation.item.create",
+                        "item": item,
+                        "event_id": event_id,
+                        "previous_item_id": previous_item_id,
+                    }
+                ),
+            )
+        )
+
+    def truncate(
+        self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN
+    ) -> None:
+        """Send this event to truncate a previous assistant message’s audio.
+
+        The server
+        will produce audio faster than realtime, so this event is useful when the user
+        interrupts to truncate audio that has already been sent to the client but not
+        yet played. This will synchronize the server's understanding of the audio with
+        the client's playback.
+
+        Truncating audio will delete the server-side text transcript to ensure there
+        is not text in the context that hasn't been heard by the user.
+
+        If successful, the server will respond with a `conversation.item.truncated`
+        event.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given(
+                    {
+                        "type": "conversation.item.truncate",
+                        "audio_end_ms": audio_end_ms,
+                        "content_index": content_index,
+                        "item_id": item_id,
+                        "event_id": event_id,
+                    }
+                ),
+            )
+        )
+
+    def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """
+        Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
+        The server will respond with a `conversation.item.retrieved` event,
+        unless the item does not exist in the conversation history, in which case the
+        server will respond with an error.
+        """
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "conversation.item.retrieve", "item_id": item_id, "event_id": event_id}),
+            )
+        )
+
+
+class RealtimeTranscriptionSessionResource(BaseRealtimeConnectionResource):
+    def update(
+        self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
+    ) -> None:
+        """Send this event to update a transcription session."""
+        self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "transcription_session.update", "session": session, "event_id": event_id}),
+            )
+        )
+
+
+class BaseAsyncRealtimeConnectionResource:
+    def __init__(self, connection: AsyncRealtimeConnection) -> None:
+        self._connection = connection
+
+
+class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource):
+    async def update(
+        self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN
+    ) -> None:
+        """
+        Send this event to update the session’s default configuration.
+        The client may send this event at any time to update any field,
+        except for `voice`. However, note that once a session has been
+        initialized with a particular `model`, it can’t be changed to
+        another model using `session.update`.
+
+        When the server receives a `session.update`, it will respond
+        with a `session.updated` event showing the full, effective configuration.
+        Only the fields that are present are updated. To clear a field like
+        `instructions`, pass an empty string.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "session.update", "session": session, "event_id": event_id}),
+            )
+        )
+
+
+class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource):
+    async def create(
+        self,
+        *,
+        event_id: str | NotGiven = NOT_GIVEN,
+        response: response_create_event_param.Response | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        This event instructs the server to create a Response, which means triggering
+        model inference. When in Server VAD mode, the server will create Responses
+        automatically.
+
+        A Response will include at least one Item, and may have two, in which case
+        the second will be a function call. These Items will be appended to the
+        conversation history.
+
+        The server will respond with a `response.created` event, events for Items
+        and content created, and finally a `response.done` event to indicate the
+        Response is complete.
+
+        The `response.create` event includes inference configuration like
+        `instructions`, and `temperature`. These fields will override the Session's
+        configuration for this Response only.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
+            )
+        )
+
+    async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event to cancel an in-progress response.
+
+        The server will respond
+        with a `response.cancelled` event or an error if there is no response to
+        cancel.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
+            )
+        )
+
+
+class AsyncRealtimeInputAudioBufferResource(BaseAsyncRealtimeConnectionResource):
+    async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event to clear the audio bytes in the buffer.
+
+        The server will
+        respond with an `input_audio_buffer.cleared` event.
+        """
+        await self._connection.send(
+            cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
+        )
+
+    async def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """
+        Send this event to commit the user input audio buffer, which will create a
+        new user message item in the conversation. This event will produce an error
+        if the input audio buffer is empty. When in Server VAD mode, the client does
+        not need to send this event, the server will commit the audio buffer
+        automatically.
+
+        Committing the input audio buffer will trigger input audio transcription
+        (if enabled in session configuration), but it will not create a response
+        from the model. The server will respond with an `input_audio_buffer.committed`
+        event.
+        """
+        await self._connection.send(
+            cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
+        )
+
+    async def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event to append audio bytes to the input audio buffer.
+
+        The audio
+        buffer is temporary storage you can write to and later commit. In Server VAD
+        mode, the audio buffer is used to detect speech and the server will decide
+        when to commit. When Server VAD is disabled, you must commit the audio buffer
+        manually.
+
+        The client may choose how much audio to place in each event up to a maximum
+        of 15 MiB, for example streaming smaller chunks from the client may allow the
+        VAD to be more responsive. Unlike made other client events, the server will
+        not send a confirmation response to this event.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
+            )
+        )
+
+
+class AsyncRealtimeConversationResource(BaseAsyncRealtimeConnectionResource):
+    @cached_property
+    def item(self) -> AsyncRealtimeConversationItemResource:
+        return AsyncRealtimeConversationItemResource(self._connection)
+
+
+class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource):
+    async def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """Send this event when you want to remove any item from the conversation
+        history.
+
+        The server will respond with a `conversation.item.deleted` event,
+        unless the item does not exist in the conversation history, in which case the
+        server will respond with an error.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}),
+            )
+        )
+
+    async def create(
+        self,
+        *,
+        item: ConversationItemParam,
+        event_id: str | NotGiven = NOT_GIVEN,
+        previous_item_id: str | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Add a new Item to the Conversation's context, including messages, function
+        calls, and function call responses. This event can be used both to populate a
+        "history" of the conversation and to add new items mid-stream, but has the
+        current limitation that it cannot populate assistant audio messages.
+
+        If successful, the server will respond with a `conversation.item.created`
+        event, otherwise an `error` event will be sent.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given(
+                    {
+                        "type": "conversation.item.create",
+                        "item": item,
+                        "event_id": event_id,
+                        "previous_item_id": previous_item_id,
+                    }
+                ),
+            )
+        )
+
+    async def truncate(
+        self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN
+    ) -> None:
+        """Send this event to truncate a previous assistant message’s audio.
+
+        The server
+        will produce audio faster than realtime, so this event is useful when the user
+        interrupts to truncate audio that has already been sent to the client but not
+        yet played. This will synchronize the server's understanding of the audio with
+        the client's playback.
+
+        Truncating audio will delete the server-side text transcript to ensure there
+        is not text in the context that hasn't been heard by the user.
+
+        If successful, the server will respond with a `conversation.item.truncated`
+        event.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given(
+                    {
+                        "type": "conversation.item.truncate",
+                        "audio_end_ms": audio_end_ms,
+                        "content_index": content_index,
+                        "item_id": item_id,
+                        "event_id": event_id,
+                    }
+                ),
+            )
+        )
+
+    async def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """
+        Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
+        The server will respond with a `conversation.item.retrieved` event,
+        unless the item does not exist in the conversation history, in which case the
+        server will respond with an error.
+        """
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "conversation.item.retrieve", "item_id": item_id, "event_id": event_id}),
+            )
+        )
+
+
+class AsyncRealtimeTranscriptionSessionResource(BaseAsyncRealtimeConnectionResource):
+    async def update(
+        self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
+    ) -> None:
+        """Send this event to update a transcription session."""
+        await self._connection.send(
+            cast(
+                RealtimeClientEventParam,
+                strip_not_given({"type": "transcription_session.update", "session": session, "event_id": event_id}),
+            )
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/sessions.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/sessions.py
new file mode 100644
index 00000000..5884e54d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/sessions.py
@@ -0,0 +1,383 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._base_client import make_request_options
+from ....types.beta.realtime import session_create_params
+from ....types.beta.realtime.session_create_response import SessionCreateResponse
+
+__all__ = ["Sessions", "AsyncSessions"]
+
+
+class Sessions(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> SessionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return SessionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> SessionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return SessionsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
+        input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
+        input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
+        instructions: str | NotGiven = NOT_GIVEN,
+        max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
+        modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
+        model: Literal[
+            "gpt-4o-realtime-preview",
+            "gpt-4o-realtime-preview-2024-10-01",
+            "gpt-4o-realtime-preview-2024-12-17",
+            "gpt-4o-mini-realtime-preview",
+            "gpt-4o-mini-realtime-preview-2024-12-17",
+        ]
+        | NotGiven = NOT_GIVEN,
+        output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        tool_choice: str | NotGiven = NOT_GIVEN,
+        tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
+        turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
+        voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SessionCreateResponse:
+        """
+        Create an ephemeral API token for use in client-side applications with the
+        Realtime API. Can be configured with the same session parameters as the
+        `session.update` client event.
+
+        It responds with a session object, plus a `client_secret` key which contains a
+        usable ephemeral API token that can be used to authenticate browser clients for
+        the Realtime API.
+
+        Args:
+          input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
+              `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
+              (mono), and little-endian byte order.
+
+          input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
+              off. Noise reduction filters audio added to the input audio buffer before it is
+              sent to VAD and the model. Filtering the audio can improve VAD and turn
+              detection accuracy (reducing false positives) and model performance by improving
+              perception of the input audio.
+
+          input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
+              `null` to turn off once on. Input audio transcription is not native to the
+              model, since the model consumes audio directly. Transcription runs
+              asynchronously through
+              [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+              and should be treated as guidance of input audio content rather than precisely
+              what the model heard. The client can optionally set the language and prompt for
+              transcription, these offer additional guidance to the transcription service.
+
+          instructions: The default system instructions (i.e. system message) prepended to model calls.
+              This field allows the client to guide the model on desired responses. The model
+              can be instructed on response content and format, (e.g. "be extremely succinct",
+              "act friendly", "here are examples of good responses") and on audio behavior
+              (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
+              instructions are not guaranteed to be followed by the model, but they provide
+              guidance to the model on the desired behavior.
+
+              Note that the server sets default instructions which will be used if this field
+              is not set and are visible in the `session.created` event at the start of the
+              session.
+
+          max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
+              tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+              `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+
+          modalities: The set of modalities the model can respond with. To disable audio, set this to
+              ["text"].
+
+          model: The Realtime model used for this session.
+
+          output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
+              For `pcm16`, output audio is sampled at a rate of 24kHz.
+
+          temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
+              temperature of 0.8 is highly recommended for best performance.
+
+          tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
+              a function.
+
+          tools: Tools (functions) available to the model.
+
+          turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
+              set to `null` to turn off, in which case the client must manually trigger model
+              response. Server VAD means that the model will detect the start and end of
+              speech based on audio volume and respond at the end of user speech. Semantic VAD
+              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              semantically estimate whether the user has finished speaking, then dynamically
+              sets a timeout based on this probability. For example, if user audio trails off
+              with "uhhm", the model will score a low probability of turn end and wait longer
+              for the user to continue speaking. This can be useful for more natural
+              conversations, but may have a higher latency.
+
+          voice: The voice the model uses to respond. Voice cannot be changed during the session
+              once the model has responded with audio at least once. Current voice options are
+              `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            "/realtime/sessions",
+            body=maybe_transform(
+                {
+                    "input_audio_format": input_audio_format,
+                    "input_audio_noise_reduction": input_audio_noise_reduction,
+                    "input_audio_transcription": input_audio_transcription,
+                    "instructions": instructions,
+                    "max_response_output_tokens": max_response_output_tokens,
+                    "modalities": modalities,
+                    "model": model,
+                    "output_audio_format": output_audio_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "turn_detection": turn_detection,
+                    "voice": voice,
+                },
+                session_create_params.SessionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=SessionCreateResponse,
+        )
+
+
+class AsyncSessions(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncSessionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncSessionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncSessionsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
+        input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
+        input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
+        instructions: str | NotGiven = NOT_GIVEN,
+        max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
+        modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
+        model: Literal[
+            "gpt-4o-realtime-preview",
+            "gpt-4o-realtime-preview-2024-10-01",
+            "gpt-4o-realtime-preview-2024-12-17",
+            "gpt-4o-mini-realtime-preview",
+            "gpt-4o-mini-realtime-preview-2024-12-17",
+        ]
+        | NotGiven = NOT_GIVEN,
+        output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
+        temperature: float | NotGiven = NOT_GIVEN,
+        tool_choice: str | NotGiven = NOT_GIVEN,
+        tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
+        turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
+        voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SessionCreateResponse:
+        """
+        Create an ephemeral API token for use in client-side applications with the
+        Realtime API. Can be configured with the same session parameters as the
+        `session.update` client event.
+
+        It responds with a session object, plus a `client_secret` key which contains a
+        usable ephemeral API token that can be used to authenticate browser clients for
+        the Realtime API.
+
+        Args:
+          input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
+              `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
+              (mono), and little-endian byte order.
+
+          input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
+              off. Noise reduction filters audio added to the input audio buffer before it is
+              sent to VAD and the model. Filtering the audio can improve VAD and turn
+              detection accuracy (reducing false positives) and model performance by improving
+              perception of the input audio.
+
+          input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
+              `null` to turn off once on. Input audio transcription is not native to the
+              model, since the model consumes audio directly. Transcription runs
+              asynchronously through
+              [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+              and should be treated as guidance of input audio content rather than precisely
+              what the model heard. The client can optionally set the language and prompt for
+              transcription, these offer additional guidance to the transcription service.
+
+          instructions: The default system instructions (i.e. system message) prepended to model calls.
+              This field allows the client to guide the model on desired responses. The model
+              can be instructed on response content and format, (e.g. "be extremely succinct",
+              "act friendly", "here are examples of good responses") and on audio behavior
+              (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
+              instructions are not guaranteed to be followed by the model, but they provide
+              guidance to the model on the desired behavior.
+
+              Note that the server sets default instructions which will be used if this field
+              is not set and are visible in the `session.created` event at the start of the
+              session.
+
+          max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
+              tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+              `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+
+          modalities: The set of modalities the model can respond with. To disable audio, set this to
+              ["text"].
+
+          model: The Realtime model used for this session.
+
+          output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
+              For `pcm16`, output audio is sampled at a rate of 24kHz.
+
+          temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
+              temperature of 0.8 is highly recommended for best performance.
+
+          tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
+              a function.
+
+          tools: Tools (functions) available to the model.
+
+          turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
+              set to `null` to turn off, in which case the client must manually trigger model
+              response. Server VAD means that the model will detect the start and end of
+              speech based on audio volume and respond at the end of user speech. Semantic VAD
+              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              semantically estimate whether the user has finished speaking, then dynamically
+              sets a timeout based on this probability. For example, if user audio trails off
+              with "uhhm", the model will score a low probability of turn end and wait longer
+              for the user to continue speaking. This can be useful for more natural
+              conversations, but may have a higher latency.
+
+          voice: The voice the model uses to respond. Voice cannot be changed during the session
+              once the model has responded with audio at least once. Current voice options are
+              `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            "/realtime/sessions",
+            body=await async_maybe_transform(
+                {
+                    "input_audio_format": input_audio_format,
+                    "input_audio_noise_reduction": input_audio_noise_reduction,
+                    "input_audio_transcription": input_audio_transcription,
+                    "instructions": instructions,
+                    "max_response_output_tokens": max_response_output_tokens,
+                    "modalities": modalities,
+                    "model": model,
+                    "output_audio_format": output_audio_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "turn_detection": turn_detection,
+                    "voice": voice,
+                },
+                session_create_params.SessionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=SessionCreateResponse,
+        )
+
+
+class SessionsWithRawResponse:
+    def __init__(self, sessions: Sessions) -> None:
+        self._sessions = sessions
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            sessions.create,
+        )
+
+
+class AsyncSessionsWithRawResponse:
+    def __init__(self, sessions: AsyncSessions) -> None:
+        self._sessions = sessions
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            sessions.create,
+        )
+
+
+class SessionsWithStreamingResponse:
+    def __init__(self, sessions: Sessions) -> None:
+        self._sessions = sessions
+
+        self.create = to_streamed_response_wrapper(
+            sessions.create,
+        )
+
+
+class AsyncSessionsWithStreamingResponse:
+    def __init__(self, sessions: AsyncSessions) -> None:
+        self._sessions = sessions
+
+        self.create = async_to_streamed_response_wrapper(
+            sessions.create,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/transcription_sessions.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/transcription_sessions.py
new file mode 100644
index 00000000..0917da71
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/realtime/transcription_sessions.py
@@ -0,0 +1,277 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._base_client import make_request_options
+from ....types.beta.realtime import transcription_session_create_params
+from ....types.beta.realtime.transcription_session import TranscriptionSession
+
+__all__ = ["TranscriptionSessions", "AsyncTranscriptionSessions"]
+
+
+class TranscriptionSessions(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> TranscriptionSessionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return TranscriptionSessionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> TranscriptionSessionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return TranscriptionSessionsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        include: List[str] | NotGiven = NOT_GIVEN,
+        input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
+        input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
+        | NotGiven = NOT_GIVEN,
+        input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
+        modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
+        turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranscriptionSession:
+        """
+        Create an ephemeral API token for use in client-side applications with the
+        Realtime API specifically for realtime transcriptions. Can be configured with
+        the same session parameters as the `transcription_session.update` client event.
+
+        It responds with a session object, plus a `client_secret` key which contains a
+        usable ephemeral API token that can be used to authenticate browser clients for
+        the Realtime API.
+
+        Args:
+          include:
+              The set of items to include in the transcription. Current available items are:
+
+              - `item.input_audio_transcription.logprobs`
+
+          input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
+              `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
+              (mono), and little-endian byte order.
+
+          input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
+              off. Noise reduction filters audio added to the input audio buffer before it is
+              sent to VAD and the model. Filtering the audio can improve VAD and turn
+              detection accuracy (reducing false positives) and model performance by improving
+              perception of the input audio.
+
+          input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
+              language and prompt for transcription, these offer additional guidance to the
+              transcription service.
+
+          modalities: The set of modalities the model can respond with. To disable audio, set this to
+              ["text"].
+
+          turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
+              set to `null` to turn off, in which case the client must manually trigger model
+              response. Server VAD means that the model will detect the start and end of
+              speech based on audio volume and respond at the end of user speech. Semantic VAD
+              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              semantically estimate whether the user has finished speaking, then dynamically
+              sets a timeout based on this probability. For example, if user audio trails off
+              with "uhhm", the model will score a low probability of turn end and wait longer
+              for the user to continue speaking. This can be useful for more natural
+              conversations, but may have a higher latency.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            "/realtime/transcription_sessions",
+            body=maybe_transform(
+                {
+                    "include": include,
+                    "input_audio_format": input_audio_format,
+                    "input_audio_noise_reduction": input_audio_noise_reduction,
+                    "input_audio_transcription": input_audio_transcription,
+                    "modalities": modalities,
+                    "turn_detection": turn_detection,
+                },
+                transcription_session_create_params.TranscriptionSessionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=TranscriptionSession,
+        )
+
+
+class AsyncTranscriptionSessions(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncTranscriptionSessionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncTranscriptionSessionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncTranscriptionSessionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncTranscriptionSessionsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        include: List[str] | NotGiven = NOT_GIVEN,
+        input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
+        input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
+        | NotGiven = NOT_GIVEN,
+        input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
+        modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
+        turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> TranscriptionSession:
+        """
+        Create an ephemeral API token for use in client-side applications with the
+        Realtime API specifically for realtime transcriptions. Can be configured with
+        the same session parameters as the `transcription_session.update` client event.
+
+        It responds with a session object, plus a `client_secret` key which contains a
+        usable ephemeral API token that can be used to authenticate browser clients for
+        the Realtime API.
+
+        Args:
+          include:
+              The set of items to include in the transcription. Current available items are:
+
+              - `item.input_audio_transcription.logprobs`
+
+          input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
+              `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
+              (mono), and little-endian byte order.
+
+          input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
+              off. Noise reduction filters audio added to the input audio buffer before it is
+              sent to VAD and the model. Filtering the audio can improve VAD and turn
+              detection accuracy (reducing false positives) and model performance by improving
+              perception of the input audio.
+
+          input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
+              language and prompt for transcription, these offer additional guidance to the
+              transcription service.
+
+          modalities: The set of modalities the model can respond with. To disable audio, set this to
+              ["text"].
+
+          turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
+              set to `null` to turn off, in which case the client must manually trigger model
+              response. Server VAD means that the model will detect the start and end of
+              speech based on audio volume and respond at the end of user speech. Semantic VAD
+              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              semantically estimate whether the user has finished speaking, then dynamically
+              sets a timeout based on this probability. For example, if user audio trails off
+              with "uhhm", the model will score a low probability of turn end and wait longer
+              for the user to continue speaking. This can be useful for more natural
+              conversations, but may have a higher latency.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            "/realtime/transcription_sessions",
+            body=await async_maybe_transform(
+                {
+                    "include": include,
+                    "input_audio_format": input_audio_format,
+                    "input_audio_noise_reduction": input_audio_noise_reduction,
+                    "input_audio_transcription": input_audio_transcription,
+                    "modalities": modalities,
+                    "turn_detection": turn_detection,
+                },
+                transcription_session_create_params.TranscriptionSessionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=TranscriptionSession,
+        )
+
+
+class TranscriptionSessionsWithRawResponse:
+    def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
+        self._transcription_sessions = transcription_sessions
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            transcription_sessions.create,
+        )
+
+
+class AsyncTranscriptionSessionsWithRawResponse:
+    def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
+        self._transcription_sessions = transcription_sessions
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            transcription_sessions.create,
+        )
+
+
+class TranscriptionSessionsWithStreamingResponse:
+    def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
+        self._transcription_sessions = transcription_sessions
+
+        self.create = to_streamed_response_wrapper(
+            transcription_sessions.create,
+        )
+
+
+class AsyncTranscriptionSessionsWithStreamingResponse:
+    def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
+        self._transcription_sessions = transcription_sessions
+
+        self.create = async_to_streamed_response_wrapper(
+            transcription_sessions.create,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/__init__.py
new file mode 100644
index 00000000..a66e445b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .runs import (
+    Runs,
+    AsyncRuns,
+    RunsWithRawResponse,
+    AsyncRunsWithRawResponse,
+    RunsWithStreamingResponse,
+    AsyncRunsWithStreamingResponse,
+)
+from .threads import (
+    Threads,
+    AsyncThreads,
+    ThreadsWithRawResponse,
+    AsyncThreadsWithRawResponse,
+    ThreadsWithStreamingResponse,
+    AsyncThreadsWithStreamingResponse,
+)
+from .messages import (
+    Messages,
+    AsyncMessages,
+    MessagesWithRawResponse,
+    AsyncMessagesWithRawResponse,
+    MessagesWithStreamingResponse,
+    AsyncMessagesWithStreamingResponse,
+)
+
+__all__ = [
+    "Runs",
+    "AsyncRuns",
+    "RunsWithRawResponse",
+    "AsyncRunsWithRawResponse",
+    "RunsWithStreamingResponse",
+    "AsyncRunsWithStreamingResponse",
+    "Messages",
+    "AsyncMessages",
+    "MessagesWithRawResponse",
+    "AsyncMessagesWithRawResponse",
+    "MessagesWithStreamingResponse",
+    "AsyncMessagesWithStreamingResponse",
+    "Threads",
+    "AsyncThreads",
+    "ThreadsWithRawResponse",
+    "AsyncThreadsWithRawResponse",
+    "ThreadsWithStreamingResponse",
+    "AsyncThreadsWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/messages.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/messages.py
new file mode 100644
index 00000000..e3374aba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/messages.py
@@ -0,0 +1,670 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import (
+    AsyncPaginator,
+    make_request_options,
+)
+from ....types.beta.threads import message_list_params, message_create_params, message_update_params
+from ....types.beta.threads.message import Message
+from ....types.shared_params.metadata import Metadata
+from ....types.beta.threads.message_deleted import MessageDeleted
+from ....types.beta.threads.message_content_part_param import MessageContentPartParam
+
+__all__ = ["Messages", "AsyncMessages"]
+
+
+class Messages(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> MessagesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return MessagesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> MessagesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return MessagesWithStreamingResponse(self)
+
+    def create(
+        self,
+        thread_id: str,
+        *,
+        content: Union[str, Iterable[MessageContentPartParam]],
+        role: Literal["user", "assistant"],
+        attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Message:
+        """
+        Create a message.
+
+        Args:
+          content: The text contents of the message.
+
+          role:
+              The role of the entity that is creating the message. Allowed values include:
+
+              - `user`: Indicates the message is sent by an actual user and should be used in
+                most cases to represent user-generated messages.
+              - `assistant`: Indicates the message is generated by the assistant. Use this
+                value to insert messages from the assistant into the conversation.
+
+          attachments: A list of files attached to the message, and the tools they should be added to.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/threads/{thread_id}/messages",
+            body=maybe_transform(
+                {
+                    "content": content,
+                    "role": role,
+                    "attachments": attachments,
+                    "metadata": metadata,
+                },
+                message_create_params.MessageCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Message,
+        )
+
+    def retrieve(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Message:
+        """
+        Retrieve a message.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/threads/{thread_id}/messages/{message_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Message,
+        )
+
+    def update(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Message:
+        """
+        Modifies a message.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/threads/{thread_id}/messages/{message_id}",
+            body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Message,
+        )
+
+    def list(
+        self,
+        thread_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        run_id: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[Message]:
+        """
+        Returns a list of messages for a given thread.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          run_id: Filter messages by the run ID that generated them.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/threads/{thread_id}/messages",
+            page=SyncCursorPage[Message],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                        "run_id": run_id,
+                    },
+                    message_list_params.MessageListParams,
+                ),
+            ),
+            model=Message,
+        )
+
+    def delete(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> MessageDeleted:
+        """
+        Deletes a message.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._delete(
+            f"/threads/{thread_id}/messages/{message_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=MessageDeleted,
+        )
+
+
+class AsyncMessages(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncMessagesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncMessagesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncMessagesWithStreamingResponse(self)
+
+    async def create(
+        self,
+        thread_id: str,
+        *,
+        content: Union[str, Iterable[MessageContentPartParam]],
+        role: Literal["user", "assistant"],
+        attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Message:
+        """
+        Create a message.
+
+        Args:
+          content: The text contents of the message.
+
+          role:
+              The role of the entity that is creating the message. Allowed values include:
+
+              - `user`: Indicates the message is sent by an actual user and should be used in
+                most cases to represent user-generated messages.
+              - `assistant`: Indicates the message is generated by the assistant. Use this
+                value to insert messages from the assistant into the conversation.
+
+          attachments: A list of files attached to the message, and the tools they should be added to.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/threads/{thread_id}/messages",
+            body=await async_maybe_transform(
+                {
+                    "content": content,
+                    "role": role,
+                    "attachments": attachments,
+                    "metadata": metadata,
+                },
+                message_create_params.MessageCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Message,
+        )
+
+    async def retrieve(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Message:
+        """
+        Retrieve a message.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/threads/{thread_id}/messages/{message_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Message,
+        )
+
+    async def update(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Message:
+        """
+        Modifies a message.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/threads/{thread_id}/messages/{message_id}",
+            body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Message,
+        )
+
+    def list(
+        self,
+        thread_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        run_id: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[Message, AsyncCursorPage[Message]]:
+        """
+        Returns a list of messages for a given thread.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          run_id: Filter messages by the run ID that generated them.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/threads/{thread_id}/messages",
+            page=AsyncCursorPage[Message],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                        "run_id": run_id,
+                    },
+                    message_list_params.MessageListParams,
+                ),
+            ),
+            model=Message,
+        )
+
+    async def delete(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> MessageDeleted:
+        """
+        Deletes a message.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._delete(
+            f"/threads/{thread_id}/messages/{message_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=MessageDeleted,
+        )
+
+
+class MessagesWithRawResponse:
+    def __init__(self, messages: Messages) -> None:
+        self._messages = messages
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            messages.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            messages.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            messages.update,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            messages.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            messages.delete,
+        )
+
+
+class AsyncMessagesWithRawResponse:
+    def __init__(self, messages: AsyncMessages) -> None:
+        self._messages = messages
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            messages.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            messages.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            messages.update,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            messages.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            messages.delete,
+        )
+
+
+class MessagesWithStreamingResponse:
+    def __init__(self, messages: Messages) -> None:
+        self._messages = messages
+
+        self.create = to_streamed_response_wrapper(
+            messages.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            messages.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            messages.update,
+        )
+        self.list = to_streamed_response_wrapper(
+            messages.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            messages.delete,
+        )
+
+
+class AsyncMessagesWithStreamingResponse:
+    def __init__(self, messages: AsyncMessages) -> None:
+        self._messages = messages
+
+        self.create = async_to_streamed_response_wrapper(
+            messages.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            messages.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            messages.update,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            messages.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            messages.delete,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/__init__.py
new file mode 100644
index 00000000..50aa9fae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .runs import (
+    Runs,
+    AsyncRuns,
+    RunsWithRawResponse,
+    AsyncRunsWithRawResponse,
+    RunsWithStreamingResponse,
+    AsyncRunsWithStreamingResponse,
+)
+from .steps import (
+    Steps,
+    AsyncSteps,
+    StepsWithRawResponse,
+    AsyncStepsWithRawResponse,
+    StepsWithStreamingResponse,
+    AsyncStepsWithStreamingResponse,
+)
+
+__all__ = [
+    "Steps",
+    "AsyncSteps",
+    "StepsWithRawResponse",
+    "AsyncStepsWithRawResponse",
+    "StepsWithStreamingResponse",
+    "AsyncStepsWithStreamingResponse",
+    "Runs",
+    "AsyncRuns",
+    "RunsWithRawResponse",
+    "AsyncRunsWithRawResponse",
+    "RunsWithStreamingResponse",
+    "AsyncRunsWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/runs.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/runs.py
new file mode 100644
index 00000000..acb1c9b2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/runs.py
@@ -0,0 +1,2989 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import typing_extensions
+from typing import List, Union, Iterable, Optional
+from functools import partial
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ..... import _legacy_response
+from .steps import (
+    Steps,
+    AsyncSteps,
+    StepsWithRawResponse,
+    AsyncStepsWithRawResponse,
+    StepsWithStreamingResponse,
+    AsyncStepsWithStreamingResponse,
+)
+from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ....._utils import (
+    is_given,
+    required_args,
+    maybe_transform,
+    async_maybe_transform,
+)
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....._streaming import Stream, AsyncStream
+from .....pagination import SyncCursorPage, AsyncCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....lib.streaming import (
+    AssistantEventHandler,
+    AssistantEventHandlerT,
+    AssistantStreamManager,
+    AsyncAssistantEventHandler,
+    AsyncAssistantEventHandlerT,
+    AsyncAssistantStreamManager,
+)
+from .....types.beta.threads import (
+    run_list_params,
+    run_create_params,
+    run_update_params,
+    run_submit_tool_outputs_params,
+)
+from .....types.beta.threads.run import Run
+from .....types.shared.chat_model import ChatModel
+from .....types.shared_params.metadata import Metadata
+from .....types.shared.reasoning_effort import ReasoningEffort
+from .....types.beta.assistant_tool_param import AssistantToolParam
+from .....types.beta.assistant_stream_event import AssistantStreamEvent
+from .....types.beta.threads.runs.run_step_include import RunStepInclude
+from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
+from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = ["Runs", "AsyncRuns"]
+
+
+class Runs(SyncAPIResource):
+    @cached_property
+    def steps(self) -> Steps:
+        return Steps(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> RunsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return RunsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> RunsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return RunsWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Create a run.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
+              is useful for modifying the behavior on a per-run basis without overriding other
+              instructions.
+
+          additional_messages: Adds additional messages to the thread before creating the run.
+
+          instructions: Overrides the
+              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+              of the assistant. This is useful for modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        stream: Literal[True],
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[AssistantStreamEvent]:
+        """
+        Create a run.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
+              is useful for modifying the behavior on a per-run basis without overriding other
+              instructions.
+
+          additional_messages: Adds additional messages to the thread before creating the run.
+
+          instructions: Overrides the
+              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+              of the assistant. This is useful for modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        stream: bool,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | Stream[AssistantStreamEvent]:
+        """
+        Create a run.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
+              is useful for modifying the behavior on a per-run basis without overriding other
+              instructions.
+
+          additional_messages: Adds additional messages to the thread before creating the run.
+
+          instructions: Overrides the
+              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+              of the assistant. This is useful for modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["assistant_id"], ["assistant_id", "stream"])
+    def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | Stream[AssistantStreamEvent]:
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/threads/{thread_id}/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "additional_instructions": additional_instructions,
+                    "additional_messages": additional_messages,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation_strategy": truncation_strategy,
+                },
+                run_create_params.RunCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
+            ),
+            cast_to=Run,
+            stream=stream or False,
+            stream_cls=Stream[AssistantStreamEvent],
+        )
+
+    def retrieve(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Retrieves a run.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/threads/{thread_id}/runs/{run_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+        )
+
+    def update(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Modifies a run.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/threads/{thread_id}/runs/{run_id}",
+            body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+        )
+
+    def list(
+        self,
+        thread_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[Run]:
+        """
+        Returns a list of runs belonging to a thread.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/threads/{thread_id}/runs",
+            page=SyncCursorPage[Run],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    run_list_params.RunListParams,
+                ),
+            ),
+            model=Run,
+        )
+
+    def cancel(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Cancels a run that is `in_progress`.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/threads/{thread_id}/runs/{run_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+        )
+
+    def create_and_poll(
+        self,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to create a run an poll for a terminal state. More information on Run
+        lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        run = self.create(
+            thread_id=thread_id,
+            assistant_id=assistant_id,
+            include=include,
+            additional_instructions=additional_instructions,
+            additional_messages=additional_messages,
+            instructions=instructions,
+            max_completion_tokens=max_completion_tokens,
+            max_prompt_tokens=max_prompt_tokens,
+            metadata=metadata,
+            model=model,
+            response_format=response_format,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            parallel_tool_calls=parallel_tool_calls,
+            reasoning_effort=reasoning_effort,
+            # We assume we are not streaming when polling
+            stream=False,
+            tools=tools,
+            truncation_strategy=truncation_strategy,
+            top_p=top_p,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return self.poll(
+            run.id,
+            thread_id=thread_id,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            poll_interval_ms=poll_interval_ms,
+            timeout=timeout,
+        )
+
+    @overload
+    @typing_extensions.deprecated("use `stream` instead")
+    def create_and_stream(
+        self,
+        *,
+        assistant_id: str,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler]:
+        """Create a Run stream"""
+        ...
+
+    @overload
+    @typing_extensions.deprecated("use `stream` instead")
+    def create_and_stream(
+        self,
+        *,
+        assistant_id: str,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandlerT]:
+        """Create a Run stream"""
+        ...
+
+    @typing_extensions.deprecated("use `stream` instead")
+    def create_and_stream(
+        self,
+        *,
+        assistant_id: str,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
+        """Create a Run stream"""
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        make_request = partial(
+            self._post,
+            f"/threads/{thread_id}/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "additional_instructions": additional_instructions,
+                    "additional_messages": additional_messages,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "stream": True,
+                    "tools": tools,
+                    "truncation_strategy": truncation_strategy,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "reasoning_effort": reasoning_effort,
+                    "top_p": top_p,
+                },
+                run_create_params.RunCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=Stream[AssistantStreamEvent],
+        )
+        return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
+
+    def poll(
+        self,
+        run_id: str,
+        thread_id: str,
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to poll a run status until it reaches a terminal state. More
+        information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})}
+
+        if is_given(poll_interval_ms):
+            extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
+        while True:
+            response = self.with_raw_response.retrieve(
+                thread_id=thread_id,
+                run_id=run_id,
+                extra_headers=extra_headers,
+                extra_body=extra_body,
+                extra_query=extra_query,
+                timeout=timeout,
+            )
+
+            run = response.parse()
+            # Return if we reached a terminal state
+            if run.status in terminal_states:
+                return run
+
+            if not is_given(poll_interval_ms):
+                from_header = response.headers.get("openai-poll-after-ms")
+                if from_header is not None:
+                    poll_interval_ms = int(from_header)
+                else:
+                    poll_interval_ms = 1000
+
+            self._sleep(poll_interval_ms / 1000)
+
+    @overload
+    def stream(
+        self,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler]:
+        """Create a Run stream"""
+        ...
+
+    @overload
+    def stream(
+        self,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandlerT]:
+        """Create a Run stream"""
+        ...
+
+    def stream(
+        self,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
+        """Create a Run stream"""
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        make_request = partial(
+            self._post,
+            f"/threads/{thread_id}/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "additional_instructions": additional_instructions,
+                    "additional_messages": additional_messages,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "stream": True,
+                    "tools": tools,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "reasoning_effort": reasoning_effort,
+                    "truncation_strategy": truncation_strategy,
+                    "top_p": top_p,
+                },
+                run_create_params.RunCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=Stream[AssistantStreamEvent],
+        )
+        return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
+
+    @overload
+    def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        When a run has the `status: "requires_action"` and `required_action.type` is
+        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
+        tool calls once they're all completed. All outputs must be submitted in a single
+        request.
+
+        Args:
+          tool_outputs: A list of tools for which the outputs are being submitted.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        stream: Literal[True],
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[AssistantStreamEvent]:
+        """
+        When a run has the `status: "requires_action"` and `required_action.type` is
+        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
+        tool calls once they're all completed. All outputs must be submitted in a single
+        request.
+
+        Args:
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          tool_outputs: A list of tools for which the outputs are being submitted.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        stream: bool,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | Stream[AssistantStreamEvent]:
+        """
+        When a run has the `status: "requires_action"` and `required_action.type` is
+        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
+        tool calls once they're all completed. All outputs must be submitted in a single
+        request.
+
+        Args:
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          tool_outputs: A list of tools for which the outputs are being submitted.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
+    def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | Stream[AssistantStreamEvent]:
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+            body=maybe_transform(
+                {
+                    "tool_outputs": tool_outputs,
+                    "stream": stream,
+                },
+                run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=stream or False,
+            stream_cls=Stream[AssistantStreamEvent],
+        )
+
+    def submit_tool_outputs_and_poll(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to submit a tool output to a run and poll for a terminal run state.
+        More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        run = self.submit_tool_outputs(
+            run_id=run_id,
+            thread_id=thread_id,
+            tool_outputs=tool_outputs,
+            stream=False,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return self.poll(
+            run_id=run.id,
+            thread_id=thread_id,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    @overload
+    def submit_tool_outputs_stream(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler]:
+        """
+        Submit the tool outputs from a previous run and stream the run to a terminal
+        state. More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        ...
+
+    @overload
+    def submit_tool_outputs_stream(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        event_handler: AssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandlerT]:
+        """
+        Submit the tool outputs from a previous run and stream the run to a terminal
+        state. More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        ...
+
+    def submit_tool_outputs_stream(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        event_handler: AssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
+        """
+        Submit the tool outputs from a previous run and stream the run to a terminal
+        state. More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        request = partial(
+            self._post,
+            f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+            body=maybe_transform(
+                {
+                    "tool_outputs": tool_outputs,
+                    "stream": True,
+                },
+                run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=Stream[AssistantStreamEvent],
+        )
+        return AssistantStreamManager(request, event_handler=event_handler or AssistantEventHandler())
+
+
+class AsyncRuns(AsyncAPIResource):
+    @cached_property
+    def steps(self) -> AsyncSteps:
+        return AsyncSteps(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncRunsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncRunsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncRunsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncRunsWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Create a run.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
+              is useful for modifying the behavior on a per-run basis without overriding other
+              instructions.
+
+          additional_messages: Adds additional messages to the thread before creating the run.
+
+          instructions: Overrides the
+              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+              of the assistant. This is useful for modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        stream: Literal[True],
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[AssistantStreamEvent]:
+        """
+        Create a run.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
+              is useful for modifying the behavior on a per-run basis without overriding other
+              instructions.
+
+          additional_messages: Adds additional messages to the thread before creating the run.
+
+          instructions: Overrides the
+              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+              of the assistant. This is useful for modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        stream: bool,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | AsyncStream[AssistantStreamEvent]:
+        """
+        Create a run.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
+              is useful for modifying the behavior on a per-run basis without overriding other
+              instructions.
+
+          additional_messages: Adds additional messages to the thread before creating the run.
+
+          instructions: Overrides the
+              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+              of the assistant. This is useful for modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["assistant_id"], ["assistant_id", "stream"])
+    async def create(
+        self,
+        thread_id: str,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | AsyncStream[AssistantStreamEvent]:
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/threads/{thread_id}/runs",
+            body=await async_maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "additional_instructions": additional_instructions,
+                    "additional_messages": additional_messages,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation_strategy": truncation_strategy,
+                },
+                run_create_params.RunCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform({"include": include}, run_create_params.RunCreateParams),
+            ),
+            cast_to=Run,
+            stream=stream or False,
+            stream_cls=AsyncStream[AssistantStreamEvent],
+        )
+
+    async def retrieve(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Retrieves a run.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/threads/{thread_id}/runs/{run_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+        )
+
+    async def update(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Modifies a run.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/threads/{thread_id}/runs/{run_id}",
+            body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+        )
+
+    def list(
+        self,
+        thread_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[Run, AsyncCursorPage[Run]]:
+        """
+        Returns a list of runs belonging to a thread.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/threads/{thread_id}/runs",
+            page=AsyncCursorPage[Run],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    run_list_params.RunListParams,
+                ),
+            ),
+            model=Run,
+        )
+
+    async def cancel(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Cancels a run that is `in_progress`.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/threads/{thread_id}/runs/{run_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+        )
+
+    async def create_and_poll(
+        self,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to create a run an poll for a terminal state. More information on Run
+        lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        run = await self.create(
+            thread_id=thread_id,
+            assistant_id=assistant_id,
+            include=include,
+            additional_instructions=additional_instructions,
+            additional_messages=additional_messages,
+            instructions=instructions,
+            max_completion_tokens=max_completion_tokens,
+            max_prompt_tokens=max_prompt_tokens,
+            metadata=metadata,
+            model=model,
+            response_format=response_format,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            parallel_tool_calls=parallel_tool_calls,
+            reasoning_effort=reasoning_effort,
+            # We assume we are not streaming when polling
+            stream=False,
+            tools=tools,
+            truncation_strategy=truncation_strategy,
+            top_p=top_p,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return await self.poll(
+            run.id,
+            thread_id=thread_id,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            poll_interval_ms=poll_interval_ms,
+            timeout=timeout,
+        )
+
+    @overload
+    @typing_extensions.deprecated("use `stream` instead")
+    def create_and_stream(
+        self,
+        *,
+        assistant_id: str,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
+        """Create a Run stream"""
+        ...
+
+    @overload
+    @typing_extensions.deprecated("use `stream` instead")
+    def create_and_stream(
+        self,
+        *,
+        assistant_id: str,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AsyncAssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
+        """Create a Run stream"""
+        ...
+
+    @typing_extensions.deprecated("use `stream` instead")
+    def create_and_stream(
+        self,
+        *,
+        assistant_id: str,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AsyncAssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> (
+        AsyncAssistantStreamManager[AsyncAssistantEventHandler]
+        | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
+    ):
+        """Create a Run stream"""
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        request = self._post(
+            f"/threads/{thread_id}/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "additional_instructions": additional_instructions,
+                    "additional_messages": additional_messages,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "stream": True,
+                    "tools": tools,
+                    "truncation_strategy": truncation_strategy,
+                    "top_p": top_p,
+                    "parallel_tool_calls": parallel_tool_calls,
+                },
+                run_create_params.RunCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=AsyncStream[AssistantStreamEvent],
+        )
+        return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
+
+    async def poll(
+        self,
+        run_id: str,
+        thread_id: str,
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to poll a run status until it reaches a terminal state. More
+        information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})}
+
+        if is_given(poll_interval_ms):
+            extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
+        while True:
+            response = await self.with_raw_response.retrieve(
+                thread_id=thread_id,
+                run_id=run_id,
+                extra_headers=extra_headers,
+                extra_body=extra_body,
+                extra_query=extra_query,
+                timeout=timeout,
+            )
+
+            run = response.parse()
+            # Return if we reached a terminal state
+            if run.status in terminal_states:
+                return run
+
+            if not is_given(poll_interval_ms):
+                from_header = response.headers.get("openai-poll-after-ms")
+                if from_header is not None:
+                    poll_interval_ms = int(from_header)
+                else:
+                    poll_interval_ms = 1000
+
+            await self._sleep(poll_interval_ms / 1000)
+
+    @overload
+    def stream(
+        self,
+        *,
+        assistant_id: str,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
+        """Create a Run stream"""
+        ...
+
+    @overload
+    def stream(
+        self,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AsyncAssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
+        """Create a Run stream"""
+        ...
+
+    def stream(
+        self,
+        *,
+        assistant_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        thread_id: str,
+        event_handler: AsyncAssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> (
+        AsyncAssistantStreamManager[AsyncAssistantEventHandler]
+        | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
+    ):
+        """Create a Run stream"""
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        request = self._post(
+            f"/threads/{thread_id}/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "additional_instructions": additional_instructions,
+                    "additional_messages": additional_messages,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "stream": True,
+                    "tools": tools,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "reasoning_effort": reasoning_effort,
+                    "truncation_strategy": truncation_strategy,
+                    "top_p": top_p,
+                },
+                run_create_params.RunCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=AsyncStream[AssistantStreamEvent],
+        )
+        return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
+
+    @overload
+    async def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        When a run has the `status: "requires_action"` and `required_action.type` is
+        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
+        tool calls once they're all completed. All outputs must be submitted in a single
+        request.
+
+        Args:
+          tool_outputs: A list of tools for which the outputs are being submitted.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        stream: Literal[True],
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[AssistantStreamEvent]:
+        """
+        When a run has the `status: "requires_action"` and `required_action.type` is
+        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
+        tool calls once they're all completed. All outputs must be submitted in a single
+        request.
+
+        Args:
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          tool_outputs: A list of tools for which the outputs are being submitted.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        stream: bool,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | AsyncStream[AssistantStreamEvent]:
+        """
+        When a run has the `status: "requires_action"` and `required_action.type` is
+        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
+        tool calls once they're all completed. All outputs must be submitted in a single
+        request.
+
+        Args:
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          tool_outputs: A list of tools for which the outputs are being submitted.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
+    async def submit_tool_outputs(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | AsyncStream[AssistantStreamEvent]:
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+            body=await async_maybe_transform(
+                {
+                    "tool_outputs": tool_outputs,
+                    "stream": stream,
+                },
+                run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=stream or False,
+            stream_cls=AsyncStream[AssistantStreamEvent],
+        )
+
+    async def submit_tool_outputs_and_poll(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to submit a tool output to a run and poll for a terminal run state.
+        More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        run = await self.submit_tool_outputs(
+            run_id=run_id,
+            thread_id=thread_id,
+            tool_outputs=tool_outputs,
+            stream=False,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return await self.poll(
+            run_id=run.id,
+            thread_id=thread_id,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    @overload
+    def submit_tool_outputs_stream(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
+        """
+        Submit the tool outputs from a previous run and stream the run to a terminal
+        state. More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        ...
+
+    @overload
+    def submit_tool_outputs_stream(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        event_handler: AsyncAssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
+        """
+        Submit the tool outputs from a previous run and stream the run to a terminal
+        state. More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        ...
+
+    def submit_tool_outputs_stream(
+        self,
+        *,
+        tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+        run_id: str,
+        thread_id: str,
+        event_handler: AsyncAssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> (
+        AsyncAssistantStreamManager[AsyncAssistantEventHandler]
+        | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
+    ):
+        """
+        Submit the tool outputs from a previous run and stream the run to a terminal
+        state. More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        request = self._post(
+            f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+            body=maybe_transform(
+                {
+                    "tool_outputs": tool_outputs,
+                    "stream": True,
+                },
+                run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=AsyncStream[AssistantStreamEvent],
+        )
+        return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
+
+
+class RunsWithRawResponse:
+    def __init__(self, runs: Runs) -> None:
+        self._runs = runs
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            runs.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            runs.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            runs.update,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            runs.list,
+        )
+        self.cancel = _legacy_response.to_raw_response_wrapper(
+            runs.cancel,
+        )
+        self.submit_tool_outputs = _legacy_response.to_raw_response_wrapper(
+            runs.submit_tool_outputs,
+        )
+
+    @cached_property
+    def steps(self) -> StepsWithRawResponse:
+        return StepsWithRawResponse(self._runs.steps)
+
+
+class AsyncRunsWithRawResponse:
+    def __init__(self, runs: AsyncRuns) -> None:
+        self._runs = runs
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            runs.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            runs.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            runs.update,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            runs.list,
+        )
+        self.cancel = _legacy_response.async_to_raw_response_wrapper(
+            runs.cancel,
+        )
+        self.submit_tool_outputs = _legacy_response.async_to_raw_response_wrapper(
+            runs.submit_tool_outputs,
+        )
+
+    @cached_property
+    def steps(self) -> AsyncStepsWithRawResponse:
+        return AsyncStepsWithRawResponse(self._runs.steps)
+
+
+class RunsWithStreamingResponse:
+    def __init__(self, runs: Runs) -> None:
+        self._runs = runs
+
+        self.create = to_streamed_response_wrapper(
+            runs.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            runs.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            runs.update,
+        )
+        self.list = to_streamed_response_wrapper(
+            runs.list,
+        )
+        self.cancel = to_streamed_response_wrapper(
+            runs.cancel,
+        )
+        self.submit_tool_outputs = to_streamed_response_wrapper(
+            runs.submit_tool_outputs,
+        )
+
+    @cached_property
+    def steps(self) -> StepsWithStreamingResponse:
+        return StepsWithStreamingResponse(self._runs.steps)
+
+
+class AsyncRunsWithStreamingResponse:
+    def __init__(self, runs: AsyncRuns) -> None:
+        self._runs = runs
+
+        self.create = async_to_streamed_response_wrapper(
+            runs.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            runs.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            runs.update,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            runs.list,
+        )
+        self.cancel = async_to_streamed_response_wrapper(
+            runs.cancel,
+        )
+        self.submit_tool_outputs = async_to_streamed_response_wrapper(
+            runs.submit_tool_outputs,
+        )
+
+    @cached_property
+    def steps(self) -> AsyncStepsWithStreamingResponse:
+        return AsyncStepsWithStreamingResponse(self._runs.steps)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/steps.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/steps.py
new file mode 100644
index 00000000..709c729d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/runs/steps.py
@@ -0,0 +1,381 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ....._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncCursorPage, AsyncCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.beta.threads.runs import step_list_params, step_retrieve_params
+from .....types.beta.threads.runs.run_step import RunStep
+from .....types.beta.threads.runs.run_step_include import RunStepInclude
+
+__all__ = ["Steps", "AsyncSteps"]
+
+
+class Steps(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> StepsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return StepsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> StepsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return StepsWithStreamingResponse(self)
+
+    def retrieve(
+        self,
+        step_id: str,
+        *,
+        thread_id: str,
+        run_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> RunStep:
+        """
+        Retrieves a run step.
+
+        Args:
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        if not step_id:
+            raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
+            ),
+            cast_to=RunStep,
+        )
+
+    def list(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[RunStep]:
+        """
+        Returns a list of run steps belonging to a run.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/threads/{thread_id}/runs/{run_id}/steps",
+            page=SyncCursorPage[RunStep],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "include": include,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    step_list_params.StepListParams,
+                ),
+            ),
+            model=RunStep,
+        )
+
+
+class AsyncSteps(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncStepsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncStepsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncStepsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncStepsWithStreamingResponse(self)
+
+    async def retrieve(
+        self,
+        step_id: str,
+        *,
+        thread_id: str,
+        run_id: str,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> RunStep:
+        """
+        Retrieves a run step.
+
+        Args:
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        if not step_id:
+            raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
+            ),
+            cast_to=RunStep,
+        )
+
+    def list(
+        self,
+        run_id: str,
+        *,
+        thread_id: str,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[RunStep, AsyncCursorPage[RunStep]]:
+        """
+        Returns a list of run steps belonging to a run.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          include: A list of additional fields to include in the response. Currently the only
+              supported value is `step_details.tool_calls[*].file_search.results[*].content`
+              to fetch the file search result content.
+
+              See the
+              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+              for more information.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not run_id:
+            raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/threads/{thread_id}/runs/{run_id}/steps",
+            page=AsyncCursorPage[RunStep],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "include": include,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    step_list_params.StepListParams,
+                ),
+            ),
+            model=RunStep,
+        )
+
+
+class StepsWithRawResponse:
+    def __init__(self, steps: Steps) -> None:
+        self._steps = steps
+
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            steps.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            steps.list,
+        )
+
+
+class AsyncStepsWithRawResponse:
+    def __init__(self, steps: AsyncSteps) -> None:
+        self._steps = steps
+
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            steps.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            steps.list,
+        )
+
+
+class StepsWithStreamingResponse:
+    def __init__(self, steps: Steps) -> None:
+        self._steps = steps
+
+        self.retrieve = to_streamed_response_wrapper(
+            steps.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            steps.list,
+        )
+
+
+class AsyncStepsWithStreamingResponse:
+    def __init__(self, steps: AsyncSteps) -> None:
+        self._steps = steps
+
+        self.retrieve = async_to_streamed_response_wrapper(
+            steps.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            steps.list,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/threads.py b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/threads.py
new file mode 100644
index 00000000..d88559bd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/beta/threads/threads.py
@@ -0,0 +1,1875 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from functools import partial
+from typing_extensions import Literal, overload
+
+import httpx
+
+from .... import _legacy_response
+from .messages import (
+    Messages,
+    AsyncMessages,
+    MessagesWithRawResponse,
+    AsyncMessagesWithRawResponse,
+    MessagesWithStreamingResponse,
+    AsyncMessagesWithStreamingResponse,
+)
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
+    required_args,
+    maybe_transform,
+    async_maybe_transform,
+)
+from .runs.runs import (
+    Runs,
+    AsyncRuns,
+    RunsWithRawResponse,
+    AsyncRunsWithRawResponse,
+    RunsWithStreamingResponse,
+    AsyncRunsWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._streaming import Stream, AsyncStream
+from ....types.beta import (
+    thread_create_params,
+    thread_update_params,
+    thread_create_and_run_params,
+)
+from ...._base_client import make_request_options
+from ....lib.streaming import (
+    AssistantEventHandler,
+    AssistantEventHandlerT,
+    AssistantStreamManager,
+    AsyncAssistantEventHandler,
+    AsyncAssistantEventHandlerT,
+    AsyncAssistantStreamManager,
+)
+from ....types.beta.thread import Thread
+from ....types.beta.threads.run import Run
+from ....types.shared.chat_model import ChatModel
+from ....types.beta.thread_deleted import ThreadDeleted
+from ....types.shared_params.metadata import Metadata
+from ....types.beta.assistant_stream_event import AssistantStreamEvent
+from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
+from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = ["Threads", "AsyncThreads"]
+
+
+class Threads(SyncAPIResource):
+    @cached_property
+    def runs(self) -> Runs:
+        return Runs(self._client)
+
+    @cached_property
+    def messages(self) -> Messages:
+        return Messages(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ThreadsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ThreadsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ThreadsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ThreadsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Thread:
+        """
+        Create a thread.
+
+        Args:
+          messages: A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+              start the thread with.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          tool_resources: A set of resources that are made available to the assistant's tools in this
+              thread. The resources are specific to the type of tool. For example, the
+              `code_interpreter` tool requires a list of file IDs, while the `file_search`
+              tool requires a list of vector store IDs.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            "/threads",
+            body=maybe_transform(
+                {
+                    "messages": messages,
+                    "metadata": metadata,
+                    "tool_resources": tool_resources,
+                },
+                thread_create_params.ThreadCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Thread,
+        )
+
+    def retrieve(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Thread:
+        """
+        Retrieves a thread.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Thread,
+        )
+
+    def update(
+        self,
+        thread_id: str,
+        *,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Thread:
+        """
+        Modifies a thread.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          tool_resources: A set of resources that are made available to the assistant's tools in this
+              thread. The resources are specific to the type of tool. For example, the
+              `code_interpreter` tool requires a list of file IDs, while the `file_search`
+              tool requires a list of vector store IDs.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/threads/{thread_id}",
+            body=maybe_transform(
+                {
+                    "metadata": metadata,
+                    "tool_resources": tool_resources,
+                },
+                thread_update_params.ThreadUpdateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Thread,
+        )
+
+    def delete(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ThreadDeleted:
+        """
+        Delete a thread.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._delete(
+            f"/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ThreadDeleted,
+        )
+
+    @overload
+    def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Create a thread and run it in one request.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          instructions: Override the default system message of the assistant. This is useful for
+              modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          thread: Options to create a new thread. If no thread is provided when running a request,
+              an empty thread will be created.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        stream: Literal[True],
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[AssistantStreamEvent]:
+        """
+        Create a thread and run it in one request.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          instructions: Override the default system message of the assistant. This is useful for
+              modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          thread: Options to create a new thread. If no thread is provided when running a request,
+              an empty thread will be created.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        stream: bool,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | Stream[AssistantStreamEvent]:
+        """
+        Create a thread and run it in one request.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          instructions: Override the default system message of the assistant. This is useful for
+              modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          thread: Options to create a new thread. If no thread is provided when running a request,
+              an empty thread will be created.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["assistant_id"], ["assistant_id", "stream"])
+    def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | Stream[AssistantStreamEvent]:
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            "/threads/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "response_format": response_format,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "thread": thread,
+                    "tool_choice": tool_choice,
+                    "tool_resources": tool_resources,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation_strategy": truncation_strategy,
+                },
+                thread_create_and_run_params.ThreadCreateAndRunParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=stream or False,
+            stream_cls=Stream[AssistantStreamEvent],
+        )
+
+    def create_and_run_poll(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to create a thread, start a run and then poll for a terminal state.
+        More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        run = self.create_and_run(
+            assistant_id=assistant_id,
+            instructions=instructions,
+            max_completion_tokens=max_completion_tokens,
+            max_prompt_tokens=max_prompt_tokens,
+            metadata=metadata,
+            model=model,
+            parallel_tool_calls=parallel_tool_calls,
+            response_format=response_format,
+            temperature=temperature,
+            stream=False,
+            thread=thread,
+            tool_resources=tool_resources,
+            tool_choice=tool_choice,
+            truncation_strategy=truncation_strategy,
+            top_p=top_p,
+            tools=tools,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return self.runs.poll(run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms)
+
+    @overload
+    def create_and_run_stream(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler]:
+        """Create a thread and stream the run back"""
+        ...
+
+    @overload
+    def create_and_run_stream(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        event_handler: AssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandlerT]:
+        """Create a thread and stream the run back"""
+        ...
+
+    def create_and_run_stream(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        event_handler: AssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
+        """Create a thread and stream the run back"""
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.create_and_run_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        make_request = partial(
+            self._post,
+            "/threads/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "stream": True,
+                    "thread": thread,
+                    "tools": tools,
+                    "tool_resources": tool_resources,
+                    "truncation_strategy": truncation_strategy,
+                    "top_p": top_p,
+                },
+                thread_create_and_run_params.ThreadCreateAndRunParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=Stream[AssistantStreamEvent],
+        )
+        return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
+
+
+class AsyncThreads(AsyncAPIResource):
+    @cached_property
+    def runs(self) -> AsyncRuns:
+        return AsyncRuns(self._client)
+
+    @cached_property
+    def messages(self) -> AsyncMessages:
+        return AsyncMessages(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncThreadsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncThreadsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncThreadsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Thread:
+        """
+        Create a thread.
+
+        Args:
+          messages: A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+              start the thread with.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          tool_resources: A set of resources that are made available to the assistant's tools in this
+              thread. The resources are specific to the type of tool. For example, the
+              `code_interpreter` tool requires a list of file IDs, while the `file_search`
+              tool requires a list of vector store IDs.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            "/threads",
+            body=await async_maybe_transform(
+                {
+                    "messages": messages,
+                    "metadata": metadata,
+                    "tool_resources": tool_resources,
+                },
+                thread_create_params.ThreadCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Thread,
+        )
+
+    async def retrieve(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Thread:
+        """
+        Retrieves a thread.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Thread,
+        )
+
+    async def update(
+        self,
+        thread_id: str,
+        *,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Thread:
+        """
+        Modifies a thread.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          tool_resources: A set of resources that are made available to the assistant's tools in this
+              thread. The resources are specific to the type of tool. For example, the
+              `code_interpreter` tool requires a list of file IDs, while the `file_search`
+              tool requires a list of vector store IDs.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/threads/{thread_id}",
+            body=await async_maybe_transform(
+                {
+                    "metadata": metadata,
+                    "tool_resources": tool_resources,
+                },
+                thread_update_params.ThreadUpdateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Thread,
+        )
+
+    async def delete(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ThreadDeleted:
+        """
+        Delete a thread.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._delete(
+            f"/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ThreadDeleted,
+        )
+
+    @overload
+    async def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        Create a thread and run it in one request.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          instructions: Override the default system message of the assistant. This is useful for
+              modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          thread: Options to create a new thread. If no thread is provided when running a request,
+              an empty thread will be created.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        stream: Literal[True],
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[AssistantStreamEvent]:
+        """
+        Create a thread and run it in one request.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          instructions: Override the default system message of the assistant. This is useful for
+              modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          thread: Options to create a new thread. If no thread is provided when running a request,
+              an empty thread will be created.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        stream: bool,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | AsyncStream[AssistantStreamEvent]:
+        """
+        Create a thread and run it in one request.
+
+        Args:
+          assistant_id: The ID of the
+              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+              execute this run.
+
+          stream: If `true`, returns a stream of events that happen during the Run as server-sent
+              events, terminating when the Run enters a terminal state with a `data: [DONE]`
+              message.
+
+          instructions: Override the default system message of the assistant. This is useful for
+              modifying the behavior on a per-run basis.
+
+          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+              run. The run will make a best effort to use only the number of completion tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              completion tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
+              The run will make a best effort to use only the number of prompt tokens
+              specified, across multiple turns of the run. If the run exceeds the number of
+              prompt tokens specified, the run will end with status `incomplete`. See
+              `incomplete_details` for more info.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+              be used to execute this run. If a value is provided here, it will override the
+              model associated with the assistant. If not, the model associated with the
+              assistant will be used.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          response_format: Specifies the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in a long-running and seemingly "stuck" request. Also note that
+              the message content may be partially cut off if `finish_reason="length"`, which
+              indicates the generation exceeded `max_tokens` or the conversation exceeded the
+              max context length.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+          thread: Options to create a new thread. If no thread is provided when running a request,
+              an empty thread will be created.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tools and instead generates a message. `auto` is the default value
+              and means the model can pick between generating a message or calling one or more
+              tools. `required` means the model must call one or more tools before responding
+              to the user. Specifying a particular tool like `{"type": "file_search"}` or
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+          tool_resources: A set of resources that are used by the assistant's tools. The resources are
+              specific to the type of tool. For example, the `code_interpreter` tool requires
+              a list of file IDs, while the `file_search` tool requires a list of vector store
+              IDs.
+
+          tools: Override the tools the assistant can use for this run. This is useful for
+              modifying the behavior on a per-run basis.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or temperature but not both.
+
+          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
+              control the intial context window of the run.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["assistant_id"], ["assistant_id", "stream"])
+    async def create_and_run(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run | AsyncStream[AssistantStreamEvent]:
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            "/threads/runs",
+            body=await async_maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "response_format": response_format,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "thread": thread,
+                    "tool_choice": tool_choice,
+                    "tool_resources": tool_resources,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation_strategy": truncation_strategy,
+                },
+                thread_create_and_run_params.ThreadCreateAndRunParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=stream or False,
+            stream_cls=AsyncStream[AssistantStreamEvent],
+        )
+
+    async def create_and_run_poll(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Run:
+        """
+        A helper to create a thread, start a run and then poll for a terminal state.
+        More information on Run lifecycles can be found here:
+        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+        """
+        run = await self.create_and_run(
+            assistant_id=assistant_id,
+            instructions=instructions,
+            max_completion_tokens=max_completion_tokens,
+            max_prompt_tokens=max_prompt_tokens,
+            metadata=metadata,
+            model=model,
+            parallel_tool_calls=parallel_tool_calls,
+            response_format=response_format,
+            temperature=temperature,
+            stream=False,
+            thread=thread,
+            tool_resources=tool_resources,
+            tool_choice=tool_choice,
+            truncation_strategy=truncation_strategy,
+            top_p=top_p,
+            tools=tools,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return await self.runs.poll(
+            run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms
+        )
+
+    @overload
+    def create_and_run_stream(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
+        """Create a thread and stream the run back"""
+        ...
+
+    @overload
+    def create_and_run_stream(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        event_handler: AsyncAssistantEventHandlerT,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
+        """Create a thread and stream the run back"""
+        ...
+
+    def create_and_run_stream(
+        self,
+        *,
+        assistant_id: str,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+        tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+        tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        event_handler: AsyncAssistantEventHandlerT | None = None,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> (
+        AsyncAssistantStreamManager[AsyncAssistantEventHandler]
+        | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
+    ):
+        """Create a thread and stream the run back"""
+        extra_headers = {
+            "OpenAI-Beta": "assistants=v2",
+            "X-Stainless-Stream-Helper": "threads.create_and_run_stream",
+            "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+            **(extra_headers or {}),
+        }
+        request = self._post(
+            "/threads/runs",
+            body=maybe_transform(
+                {
+                    "assistant_id": assistant_id,
+                    "instructions": instructions,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_prompt_tokens": max_prompt_tokens,
+                    "metadata": metadata,
+                    "model": model,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "response_format": response_format,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "stream": True,
+                    "thread": thread,
+                    "tools": tools,
+                    "tool_resources": tool_resources,
+                    "truncation_strategy": truncation_strategy,
+                    "top_p": top_p,
+                },
+                thread_create_and_run_params.ThreadCreateAndRunParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Run,
+            stream=True,
+            stream_cls=AsyncStream[AssistantStreamEvent],
+        )
+        return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
+
+
+class ThreadsWithRawResponse:
+    def __init__(self, threads: Threads) -> None:
+        self._threads = threads
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            threads.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            threads.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            threads.update,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            threads.delete,
+        )
+        self.create_and_run = _legacy_response.to_raw_response_wrapper(
+            threads.create_and_run,
+        )
+
+    @cached_property
+    def runs(self) -> RunsWithRawResponse:
+        return RunsWithRawResponse(self._threads.runs)
+
+    @cached_property
+    def messages(self) -> MessagesWithRawResponse:
+        return MessagesWithRawResponse(self._threads.messages)
+
+
+class AsyncThreadsWithRawResponse:
+    def __init__(self, threads: AsyncThreads) -> None:
+        self._threads = threads
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            threads.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            threads.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            threads.update,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            threads.delete,
+        )
+        self.create_and_run = _legacy_response.async_to_raw_response_wrapper(
+            threads.create_and_run,
+        )
+
+    @cached_property
+    def runs(self) -> AsyncRunsWithRawResponse:
+        return AsyncRunsWithRawResponse(self._threads.runs)
+
+    @cached_property
+    def messages(self) -> AsyncMessagesWithRawResponse:
+        return AsyncMessagesWithRawResponse(self._threads.messages)
+
+
+class ThreadsWithStreamingResponse:
+    def __init__(self, threads: Threads) -> None:
+        self._threads = threads
+
+        self.create = to_streamed_response_wrapper(
+            threads.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            threads.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            threads.update,
+        )
+        self.delete = to_streamed_response_wrapper(
+            threads.delete,
+        )
+        self.create_and_run = to_streamed_response_wrapper(
+            threads.create_and_run,
+        )
+
+    @cached_property
+    def runs(self) -> RunsWithStreamingResponse:
+        return RunsWithStreamingResponse(self._threads.runs)
+
+    @cached_property
+    def messages(self) -> MessagesWithStreamingResponse:
+        return MessagesWithStreamingResponse(self._threads.messages)
+
+
+class AsyncThreadsWithStreamingResponse:
+    def __init__(self, threads: AsyncThreads) -> None:
+        self._threads = threads
+
+        self.create = async_to_streamed_response_wrapper(
+            threads.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            threads.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            threads.update,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            threads.delete,
+        )
+        self.create_and_run = async_to_streamed_response_wrapper(
+            threads.create_and_run,
+        )
+
+    @cached_property
+    def runs(self) -> AsyncRunsWithStreamingResponse:
+        return AsyncRunsWithStreamingResponse(self._threads.runs)
+
+    @cached_property
+    def messages(self) -> AsyncMessagesWithStreamingResponse:
+        return AsyncMessagesWithStreamingResponse(self._threads.messages)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/chat/__init__.py
new file mode 100644
index 00000000..52dfdcea
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/chat/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .chat import (
+    Chat,
+    AsyncChat,
+    ChatWithRawResponse,
+    AsyncChatWithRawResponse,
+    ChatWithStreamingResponse,
+    AsyncChatWithStreamingResponse,
+)
+from .completions import (
+    Completions,
+    AsyncCompletions,
+    CompletionsWithRawResponse,
+    AsyncCompletionsWithRawResponse,
+    CompletionsWithStreamingResponse,
+    AsyncCompletionsWithStreamingResponse,
+)
+
+__all__ = [
+    "Completions",
+    "AsyncCompletions",
+    "CompletionsWithRawResponse",
+    "AsyncCompletionsWithRawResponse",
+    "CompletionsWithStreamingResponse",
+    "AsyncCompletionsWithStreamingResponse",
+    "Chat",
+    "AsyncChat",
+    "ChatWithRawResponse",
+    "AsyncChatWithRawResponse",
+    "ChatWithStreamingResponse",
+    "AsyncChatWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/chat/chat.py b/.venv/lib/python3.12/site-packages/openai/resources/chat/chat.py
new file mode 100644
index 00000000..14f9224b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/chat/chat.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .completions.completions import (
+    Completions,
+    AsyncCompletions,
+    CompletionsWithRawResponse,
+    AsyncCompletionsWithRawResponse,
+    CompletionsWithStreamingResponse,
+    AsyncCompletionsWithStreamingResponse,
+)
+
+__all__ = ["Chat", "AsyncChat"]
+
+
+class Chat(SyncAPIResource):
+    @cached_property
+    def completions(self) -> Completions:
+        return Completions(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ChatWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ChatWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ChatWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ChatWithStreamingResponse(self)
+
+
+class AsyncChat(AsyncAPIResource):
+    @cached_property
+    def completions(self) -> AsyncCompletions:
+        return AsyncCompletions(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncChatWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncChatWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncChatWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncChatWithStreamingResponse(self)
+
+
+class ChatWithRawResponse:
+    def __init__(self, chat: Chat) -> None:
+        self._chat = chat
+
+    @cached_property
+    def completions(self) -> CompletionsWithRawResponse:
+        return CompletionsWithRawResponse(self._chat.completions)
+
+
+class AsyncChatWithRawResponse:
+    def __init__(self, chat: AsyncChat) -> None:
+        self._chat = chat
+
+    @cached_property
+    def completions(self) -> AsyncCompletionsWithRawResponse:
+        return AsyncCompletionsWithRawResponse(self._chat.completions)
+
+
+class ChatWithStreamingResponse:
+    def __init__(self, chat: Chat) -> None:
+        self._chat = chat
+
+    @cached_property
+    def completions(self) -> CompletionsWithStreamingResponse:
+        return CompletionsWithStreamingResponse(self._chat.completions)
+
+
+class AsyncChatWithStreamingResponse:
+    def __init__(self, chat: AsyncChat) -> None:
+        self._chat = chat
+
+    @cached_property
+    def completions(self) -> AsyncCompletionsWithStreamingResponse:
+        return AsyncCompletionsWithStreamingResponse(self._chat.completions)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/__init__.py
new file mode 100644
index 00000000..12d3b3aa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .messages import (
+    Messages,
+    AsyncMessages,
+    MessagesWithRawResponse,
+    AsyncMessagesWithRawResponse,
+    MessagesWithStreamingResponse,
+    AsyncMessagesWithStreamingResponse,
+)
+from .completions import (
+    Completions,
+    AsyncCompletions,
+    CompletionsWithRawResponse,
+    AsyncCompletionsWithRawResponse,
+    CompletionsWithStreamingResponse,
+    AsyncCompletionsWithStreamingResponse,
+)
+
+__all__ = [
+    "Messages",
+    "AsyncMessages",
+    "MessagesWithRawResponse",
+    "AsyncMessagesWithRawResponse",
+    "MessagesWithStreamingResponse",
+    "AsyncMessagesWithStreamingResponse",
+    "Completions",
+    "AsyncCompletions",
+    "CompletionsWithRawResponse",
+    "AsyncCompletionsWithRawResponse",
+    "CompletionsWithStreamingResponse",
+    "AsyncCompletionsWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/completions.py b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/completions.py
new file mode 100644
index 00000000..d28be012
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/completions.py
@@ -0,0 +1,2331 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import inspect
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, overload
+
+import httpx
+import pydantic
+
+from .... import _legacy_response
+from .messages import (
+    Messages,
+    AsyncMessages,
+    MessagesWithRawResponse,
+    AsyncMessagesWithRawResponse,
+    MessagesWithStreamingResponse,
+    AsyncMessagesWithStreamingResponse,
+)
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
+    required_args,
+    maybe_transform,
+    async_maybe_transform,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._streaming import Stream, AsyncStream
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ....types.chat import (
+    ChatCompletionAudioParam,
+    completion_list_params,
+    completion_create_params,
+    completion_update_params,
+)
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.shared.chat_model import ChatModel
+from ....types.chat.chat_completion import ChatCompletion
+from ....types.shared_params.metadata import Metadata
+from ....types.shared.reasoning_effort import ReasoningEffort
+from ....types.chat.chat_completion_chunk import ChatCompletionChunk
+from ....types.chat.chat_completion_deleted import ChatCompletionDeleted
+from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
+from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
+from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
+from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
+from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
+
+__all__ = ["Completions", "AsyncCompletions"]
+
+
+class Completions(SyncAPIResource):
+    @cached_property
+    def messages(self) -> Messages:
+        return Messages(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> CompletionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return CompletionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> CompletionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return CompletionsWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion:
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
+
+        Creates a model response for the given chat conversation. Learn more in the
+        [text generation](https://platform.openai.com/docs/guides/text-generation),
+        [vision](https://platform.openai.com/docs/guides/vision), and
+        [audio](https://platform.openai.com/docs/guides/audio) guides.
+
+        Parameter support can differ depending on the model used to generate the
+        response, particularly for newer reasoning models. Parameters that are only
+        supported for reasoning models are noted below. For the current state of
+        unsupported parameters in reasoning models,
+        [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+
+        Args:
+          messages: A list of messages comprising the conversation so far. Depending on the
+              [model](https://platform.openai.com/docs/models) you use, different message
+              types (modalities) are supported, like
+              [text](https://platform.openai.com/docs/guides/text-generation),
+              [images](https://platform.openai.com/docs/guides/vision), and
+              [audio](https://platform.openai.com/docs/guides/audio).
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          audio: Parameters for audio output. Required when audio output is requested with
+              `modalities: ["audio"]`.
+              [Learn more](https://platform.openai.com/docs/guides/audio).
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+          function_call: Deprecated in favor of `tool_choice`.
+
+              Controls which (if any) function is called by the model.
+
+              `none` means the model will not call a function and instead generates a message.
+
+              `auto` means the model can pick between generating a message or calling a
+              function.
+
+              Specifying a particular function via `{"name": "my_function"}` forces the model
+              to call that function.
+
+              `none` is the default when no functions are present. `auto` is the default if
+              functions are present.
+
+          functions: Deprecated in favor of `tools`.
+
+              A list of functions the model may generate JSON inputs for.
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the
+              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+              bias is added to the logits generated by the model prior to sampling. The exact
+              effect will vary per model, but values between -1 and 1 should decrease or
+              increase likelihood of selection; values like -100 or 100 should result in a ban
+              or exclusive selection of the relevant token.
+
+          logprobs: Whether to return log probabilities of the output tokens or not. If true,
+              returns the log probabilities of each output token returned in the `content` of
+              `message`.
+
+          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
+              completion. This value can be used to control
+              [costs](https://openai.com/api/pricing/) for text generated via API.
+
+              This value is now deprecated in favor of `max_completion_tokens`, and is not
+              compatible with
+              [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
+
+              `["text"]`
+
+              The `gpt-4o-audio-preview` model can also be used to
+              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+              this model generate both text and audio responses, you can use:
+
+              `["text", "audio"]`
+
+          n: How many chat completion choices to generate for each input message. Note that
+              you will be charged based on the number of generated tokens across all of the
+              choices. Keep `n` as `1` to minimize costs.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          prediction: Static predicted output content, such as the content of a text file that is
+              being regenerated.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
+
+          seed: This feature is in Beta. If specified, our system will make a best effort to
+              sample deterministically, such that repeated requests with the same `seed` and
+              parameters should return the same result. Determinism is not guaranteed, and you
+              should refer to the `system_fingerprint` response parameter to monitor changes
+              in the backend.
+
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', and the Project is Scale tier enabled, the system will
+                utilize scale tier credits until they are exhausted.
+              - If set to 'auto', and the Project is not Scale tier enabled, the request will
+                be processed using the default service tier with a lower uptime SLA and no
+                latency guarentee.
+              - If set to 'default', the request will be processed using the default service
+                tier with a lower uptime SLA and no latency guarentee.
+              - When not set, the default behavior is 'auto'.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          store: Whether or not to store the output of this chat completion request for use in
+              our [model distillation](https://platform.openai.com/docs/guides/distillation)
+              or [evals](https://platform.openai.com/docs/guides/evals) products.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tool and instead generates a message. `auto` means the model can
+              pick between generating a message or calling one or more tools. `required` means
+              the model must call one or more tools. Specifying a particular tool via
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+              `none` is the default when no tools are present. `auto` is the default if tools
+              are present.
+
+          tools: A list of tools the model may call. Currently, only functions are supported as a
+              tool. Use this to provide a list of functions the model may generate JSON inputs
+              for. A max of 128 functions are supported.
+
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        stream: Literal[True],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[ChatCompletionChunk]:
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
+
+        Creates a model response for the given chat conversation. Learn more in the
+        [text generation](https://platform.openai.com/docs/guides/text-generation),
+        [vision](https://platform.openai.com/docs/guides/vision), and
+        [audio](https://platform.openai.com/docs/guides/audio) guides.
+
+        Parameter support can differ depending on the model used to generate the
+        response, particularly for newer reasoning models. Parameters that are only
+        supported for reasoning models are noted below. For the current state of
+        unsupported parameters in reasoning models,
+        [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+
+        Args:
+          messages: A list of messages comprising the conversation so far. Depending on the
+              [model](https://platform.openai.com/docs/models) you use, different message
+              types (modalities) are supported, like
+              [text](https://platform.openai.com/docs/guides/text-generation),
+              [images](https://platform.openai.com/docs/guides/vision), and
+              [audio](https://platform.openai.com/docs/guides/audio).
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
+
+          audio: Parameters for audio output. Required when audio output is requested with
+              `modalities: ["audio"]`.
+              [Learn more](https://platform.openai.com/docs/guides/audio).
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+          function_call: Deprecated in favor of `tool_choice`.
+
+              Controls which (if any) function is called by the model.
+
+              `none` means the model will not call a function and instead generates a message.
+
+              `auto` means the model can pick between generating a message or calling a
+              function.
+
+              Specifying a particular function via `{"name": "my_function"}` forces the model
+              to call that function.
+
+              `none` is the default when no functions are present. `auto` is the default if
+              functions are present.
+
+          functions: Deprecated in favor of `tools`.
+
+              A list of functions the model may generate JSON inputs for.
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the
+              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+              bias is added to the logits generated by the model prior to sampling. The exact
+              effect will vary per model, but values between -1 and 1 should decrease or
+              increase likelihood of selection; values like -100 or 100 should result in a ban
+              or exclusive selection of the relevant token.
+
+          logprobs: Whether to return log probabilities of the output tokens or not. If true,
+              returns the log probabilities of each output token returned in the `content` of
+              `message`.
+
+          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
+              completion. This value can be used to control
+              [costs](https://openai.com/api/pricing/) for text generated via API.
+
+              This value is now deprecated in favor of `max_completion_tokens`, and is not
+              compatible with
+              [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
+
+              `["text"]`
+
+              The `gpt-4o-audio-preview` model can also be used to
+              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+              this model generate both text and audio responses, you can use:
+
+              `["text", "audio"]`
+
+          n: How many chat completion choices to generate for each input message. Note that
+              you will be charged based on the number of generated tokens across all of the
+              choices. Keep `n` as `1` to minimize costs.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          prediction: Static predicted output content, such as the content of a text file that is
+              being regenerated.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
+
+          seed: This feature is in Beta. If specified, our system will make a best effort to
+              sample deterministically, such that repeated requests with the same `seed` and
+              parameters should return the same result. Determinism is not guaranteed, and you
+              should refer to the `system_fingerprint` response parameter to monitor changes
+              in the backend.
+
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', and the Project is Scale tier enabled, the system will
+                utilize scale tier credits until they are exhausted.
+              - If set to 'auto', and the Project is not Scale tier enabled, the request will
+                be processed using the default service tier with a lower uptime SLA and no
+                latency guarentee.
+              - If set to 'default', the request will be processed using the default service
+                tier with a lower uptime SLA and no latency guarentee.
+              - When not set, the default behavior is 'auto'.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          store: Whether or not to store the output of this chat completion request for use in
+              our [model distillation](https://platform.openai.com/docs/guides/distillation)
+              or [evals](https://platform.openai.com/docs/guides/evals) products.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tool and instead generates a message. `auto` means the model can
+              pick between generating a message or calling one or more tools. `required` means
+              the model must call one or more tools. Specifying a particular tool via
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+              `none` is the default when no tools are present. `auto` is the default if tools
+              are present.
+
+          tools: A list of tools the model may call. Currently, only functions are supported as a
+              tool. Use this to provide a list of functions the model may generate JSON inputs
+              for. A max of 128 functions are supported.
+
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        stream: bool,
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion | Stream[ChatCompletionChunk]:
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
+
+        Creates a model response for the given chat conversation. Learn more in the
+        [text generation](https://platform.openai.com/docs/guides/text-generation),
+        [vision](https://platform.openai.com/docs/guides/vision), and
+        [audio](https://platform.openai.com/docs/guides/audio) guides.
+
+        Parameter support can differ depending on the model used to generate the
+        response, particularly for newer reasoning models. Parameters that are only
+        supported for reasoning models are noted below. For the current state of
+        unsupported parameters in reasoning models,
+        [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+
+        Args:
+          messages: A list of messages comprising the conversation so far. Depending on the
+              [model](https://platform.openai.com/docs/models) you use, different message
+              types (modalities) are supported, like
+              [text](https://platform.openai.com/docs/guides/text-generation),
+              [images](https://platform.openai.com/docs/guides/vision), and
+              [audio](https://platform.openai.com/docs/guides/audio).
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
+
+          audio: Parameters for audio output. Required when audio output is requested with
+              `modalities: ["audio"]`.
+              [Learn more](https://platform.openai.com/docs/guides/audio).
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+          function_call: Deprecated in favor of `tool_choice`.
+
+              Controls which (if any) function is called by the model.
+
+              `none` means the model will not call a function and instead generates a message.
+
+              `auto` means the model can pick between generating a message or calling a
+              function.
+
+              Specifying a particular function via `{"name": "my_function"}` forces the model
+              to call that function.
+
+              `none` is the default when no functions are present. `auto` is the default if
+              functions are present.
+
+          functions: Deprecated in favor of `tools`.
+
+              A list of functions the model may generate JSON inputs for.
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the
+              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+              bias is added to the logits generated by the model prior to sampling. The exact
+              effect will vary per model, but values between -1 and 1 should decrease or
+              increase likelihood of selection; values like -100 or 100 should result in a ban
+              or exclusive selection of the relevant token.
+
+          logprobs: Whether to return log probabilities of the output tokens or not. If true,
+              returns the log probabilities of each output token returned in the `content` of
+              `message`.
+
+          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
+              completion. This value can be used to control
+              [costs](https://openai.com/api/pricing/) for text generated via API.
+
+              This value is now deprecated in favor of `max_completion_tokens`, and is not
+              compatible with
+              [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
+
+              `["text"]`
+
+              The `gpt-4o-audio-preview` model can also be used to
+              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+              this model generate both text and audio responses, you can use:
+
+              `["text", "audio"]`
+
+          n: How many chat completion choices to generate for each input message. Note that
+              you will be charged based on the number of generated tokens across all of the
+              choices. Keep `n` as `1` to minimize costs.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          prediction: Static predicted output content, such as the content of a text file that is
+              being regenerated.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
+
+          seed: This feature is in Beta. If specified, our system will make a best effort to
+              sample deterministically, such that repeated requests with the same `seed` and
+              parameters should return the same result. Determinism is not guaranteed, and you
+              should refer to the `system_fingerprint` response parameter to monitor changes
+              in the backend.
+
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', and the Project is Scale tier enabled, the system will
+                utilize scale tier credits until they are exhausted.
+              - If set to 'auto', and the Project is not Scale tier enabled, the request will
+                be processed using the default service tier with a lower uptime SLA and no
+                latency guarentee.
+              - If set to 'default', the request will be processed using the default service
+                tier with a lower uptime SLA and no latency guarentee.
+              - When not set, the default behavior is 'auto'.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          store: Whether or not to store the output of this chat completion request for use in
+              our [model distillation](https://platform.openai.com/docs/guides/distillation)
+              or [evals](https://platform.openai.com/docs/guides/evals) products.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tool and instead generates a message. `auto` means the model can
+              pick between generating a message or calling one or more tools. `required` means
+              the model must call one or more tools. Specifying a particular tool via
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+              `none` is the default when no tools are present. `auto` is the default if tools
+              are present.
+
+          tools: A list of tools the model may call. Currently, only functions are supported as a
+              tool. Use this to provide a list of functions the model may generate JSON inputs
+              for. A max of 128 functions are supported.
+
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["messages", "model"], ["messages", "model", "stream"])
+    def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion | Stream[ChatCompletionChunk]:
+        validate_response_format(response_format)
+        return self._post(
+            "/chat/completions",
+            body=maybe_transform(
+                {
+                    "messages": messages,
+                    "model": model,
+                    "audio": audio,
+                    "frequency_penalty": frequency_penalty,
+                    "function_call": function_call,
+                    "functions": functions,
+                    "logit_bias": logit_bias,
+                    "logprobs": logprobs,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_tokens": max_tokens,
+                    "metadata": metadata,
+                    "modalities": modalities,
+                    "n": n,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "prediction": prediction,
+                    "presence_penalty": presence_penalty,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "seed": seed,
+                    "service_tier": service_tier,
+                    "stop": stop,
+                    "store": store,
+                    "stream": stream,
+                    "stream_options": stream_options,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_logprobs": top_logprobs,
+                    "top_p": top_p,
+                    "user": user,
+                    "web_search_options": web_search_options,
+                },
+                completion_create_params.CompletionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletion,
+            stream=stream or False,
+            stream_cls=Stream[ChatCompletionChunk],
+        )
+
+    def retrieve(
+        self,
+        completion_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion:
+        """Get a stored chat completion.
+
+        Only Chat Completions that have been created with
+        the `store` parameter set to `true` will be returned.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return self._get(
+            f"/chat/completions/{completion_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletion,
+        )
+
+    def update(
+        self,
+        completion_id: str,
+        *,
+        metadata: Optional[Metadata],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion:
+        """Modify a stored chat completion.
+
+        Only Chat Completions that have been created
+        with the `store` parameter set to `true` can be modified. Currently, the only
+        supported modification is to update the `metadata` field.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return self._post(
+            f"/chat/completions/{completion_id}",
+            body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletion,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: str | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[ChatCompletion]:
+        """List stored Chat Completions.
+
+        Only Chat Completions that have been stored with
+        the `store` parameter set to `true` will be returned.
+
+        Args:
+          after: Identifier for the last chat completion from the previous pagination request.
+
+          limit: Number of Chat Completions to retrieve.
+
+          metadata:
+              A list of metadata keys to filter the Chat Completions by. Example:
+
+              `metadata[key1]=value1&metadata[key2]=value2`
+
+          model: The model used to generate the Chat Completions.
+
+          order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
+              `desc` for descending order. Defaults to `asc`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/chat/completions",
+            page=SyncCursorPage[ChatCompletion],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "metadata": metadata,
+                        "model": model,
+                        "order": order,
+                    },
+                    completion_list_params.CompletionListParams,
+                ),
+            ),
+            model=ChatCompletion,
+        )
+
+    def delete(
+        self,
+        completion_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletionDeleted:
+        """Delete a stored chat completion.
+
+        Only Chat Completions that have been created
+        with the `store` parameter set to `true` can be deleted.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return self._delete(
+            f"/chat/completions/{completion_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletionDeleted,
+        )
+
+
+class AsyncCompletions(AsyncAPIResource):
+    @cached_property
+    def messages(self) -> AsyncMessages:
+        return AsyncMessages(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncCompletionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncCompletionsWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion:
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
+
+        Creates a model response for the given chat conversation. Learn more in the
+        [text generation](https://platform.openai.com/docs/guides/text-generation),
+        [vision](https://platform.openai.com/docs/guides/vision), and
+        [audio](https://platform.openai.com/docs/guides/audio) guides.
+
+        Parameter support can differ depending on the model used to generate the
+        response, particularly for newer reasoning models. Parameters that are only
+        supported for reasoning models are noted below. For the current state of
+        unsupported parameters in reasoning models,
+        [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+
+        Args:
+          messages: A list of messages comprising the conversation so far. Depending on the
+              [model](https://platform.openai.com/docs/models) you use, different message
+              types (modalities) are supported, like
+              [text](https://platform.openai.com/docs/guides/text-generation),
+              [images](https://platform.openai.com/docs/guides/vision), and
+              [audio](https://platform.openai.com/docs/guides/audio).
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          audio: Parameters for audio output. Required when audio output is requested with
+              `modalities: ["audio"]`.
+              [Learn more](https://platform.openai.com/docs/guides/audio).
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+          function_call: Deprecated in favor of `tool_choice`.
+
+              Controls which (if any) function is called by the model.
+
+              `none` means the model will not call a function and instead generates a message.
+
+              `auto` means the model can pick between generating a message or calling a
+              function.
+
+              Specifying a particular function via `{"name": "my_function"}` forces the model
+              to call that function.
+
+              `none` is the default when no functions are present. `auto` is the default if
+              functions are present.
+
+          functions: Deprecated in favor of `tools`.
+
+              A list of functions the model may generate JSON inputs for.
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the
+              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+              bias is added to the logits generated by the model prior to sampling. The exact
+              effect will vary per model, but values between -1 and 1 should decrease or
+              increase likelihood of selection; values like -100 or 100 should result in a ban
+              or exclusive selection of the relevant token.
+
+          logprobs: Whether to return log probabilities of the output tokens or not. If true,
+              returns the log probabilities of each output token returned in the `content` of
+              `message`.
+
+          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
+              completion. This value can be used to control
+              [costs](https://openai.com/api/pricing/) for text generated via API.
+
+              This value is now deprecated in favor of `max_completion_tokens`, and is not
+              compatible with
+              [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
+
+              `["text"]`
+
+              The `gpt-4o-audio-preview` model can also be used to
+              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+              this model generate both text and audio responses, you can use:
+
+              `["text", "audio"]`
+
+          n: How many chat completion choices to generate for each input message. Note that
+              you will be charged based on the number of generated tokens across all of the
+              choices. Keep `n` as `1` to minimize costs.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          prediction: Static predicted output content, such as the content of a text file that is
+              being regenerated.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
+
+          seed: This feature is in Beta. If specified, our system will make a best effort to
+              sample deterministically, such that repeated requests with the same `seed` and
+              parameters should return the same result. Determinism is not guaranteed, and you
+              should refer to the `system_fingerprint` response parameter to monitor changes
+              in the backend.
+
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', and the Project is Scale tier enabled, the system will
+                utilize scale tier credits until they are exhausted.
+              - If set to 'auto', and the Project is not Scale tier enabled, the request will
+                be processed using the default service tier with a lower uptime SLA and no
+                latency guarentee.
+              - If set to 'default', the request will be processed using the default service
+                tier with a lower uptime SLA and no latency guarentee.
+              - When not set, the default behavior is 'auto'.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          store: Whether or not to store the output of this chat completion request for use in
+              our [model distillation](https://platform.openai.com/docs/guides/distillation)
+              or [evals](https://platform.openai.com/docs/guides/evals) products.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tool and instead generates a message. `auto` means the model can
+              pick between generating a message or calling one or more tools. `required` means
+              the model must call one or more tools. Specifying a particular tool via
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+              `none` is the default when no tools are present. `auto` is the default if tools
+              are present.
+
+          tools: A list of tools the model may call. Currently, only functions are supported as a
+              tool. Use this to provide a list of functions the model may generate JSON inputs
+              for. A max of 128 functions are supported.
+
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        stream: Literal[True],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[ChatCompletionChunk]:
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
+
+        Creates a model response for the given chat conversation. Learn more in the
+        [text generation](https://platform.openai.com/docs/guides/text-generation),
+        [vision](https://platform.openai.com/docs/guides/vision), and
+        [audio](https://platform.openai.com/docs/guides/audio) guides.
+
+        Parameter support can differ depending on the model used to generate the
+        response, particularly for newer reasoning models. Parameters that are only
+        supported for reasoning models are noted below. For the current state of
+        unsupported parameters in reasoning models,
+        [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+
+        Args:
+          messages: A list of messages comprising the conversation so far. Depending on the
+              [model](https://platform.openai.com/docs/models) you use, different message
+              types (modalities) are supported, like
+              [text](https://platform.openai.com/docs/guides/text-generation),
+              [images](https://platform.openai.com/docs/guides/vision), and
+              [audio](https://platform.openai.com/docs/guides/audio).
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
+
+          audio: Parameters for audio output. Required when audio output is requested with
+              `modalities: ["audio"]`.
+              [Learn more](https://platform.openai.com/docs/guides/audio).
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+          function_call: Deprecated in favor of `tool_choice`.
+
+              Controls which (if any) function is called by the model.
+
+              `none` means the model will not call a function and instead generates a message.
+
+              `auto` means the model can pick between generating a message or calling a
+              function.
+
+              Specifying a particular function via `{"name": "my_function"}` forces the model
+              to call that function.
+
+              `none` is the default when no functions are present. `auto` is the default if
+              functions are present.
+
+          functions: Deprecated in favor of `tools`.
+
+              A list of functions the model may generate JSON inputs for.
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the
+              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+              bias is added to the logits generated by the model prior to sampling. The exact
+              effect will vary per model, but values between -1 and 1 should decrease or
+              increase likelihood of selection; values like -100 or 100 should result in a ban
+              or exclusive selection of the relevant token.
+
+          logprobs: Whether to return log probabilities of the output tokens or not. If true,
+              returns the log probabilities of each output token returned in the `content` of
+              `message`.
+
+          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
+              completion. This value can be used to control
+              [costs](https://openai.com/api/pricing/) for text generated via API.
+
+              This value is now deprecated in favor of `max_completion_tokens`, and is not
+              compatible with
+              [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
+
+              `["text"]`
+
+              The `gpt-4o-audio-preview` model can also be used to
+              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+              this model generate both text and audio responses, you can use:
+
+              `["text", "audio"]`
+
+          n: How many chat completion choices to generate for each input message. Note that
+              you will be charged based on the number of generated tokens across all of the
+              choices. Keep `n` as `1` to minimize costs.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          prediction: Static predicted output content, such as the content of a text file that is
+              being regenerated.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
+
+          seed: This feature is in Beta. If specified, our system will make a best effort to
+              sample deterministically, such that repeated requests with the same `seed` and
+              parameters should return the same result. Determinism is not guaranteed, and you
+              should refer to the `system_fingerprint` response parameter to monitor changes
+              in the backend.
+
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', and the Project is Scale tier enabled, the system will
+                utilize scale tier credits until they are exhausted.
+              - If set to 'auto', and the Project is not Scale tier enabled, the request will
+                be processed using the default service tier with a lower uptime SLA and no
+                latency guarentee.
+              - If set to 'default', the request will be processed using the default service
+                tier with a lower uptime SLA and no latency guarentee.
+              - When not set, the default behavior is 'auto'.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          store: Whether or not to store the output of this chat completion request for use in
+              our [model distillation](https://platform.openai.com/docs/guides/distillation)
+              or [evals](https://platform.openai.com/docs/guides/evals) products.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tool and instead generates a message. `auto` means the model can
+              pick between generating a message or calling one or more tools. `required` means
+              the model must call one or more tools. Specifying a particular tool via
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+              `none` is the default when no tools are present. `auto` is the default if tools
+              are present.
+
+          tools: A list of tools the model may call. Currently, only functions are supported as a
+              tool. Use this to provide a list of functions the model may generate JSON inputs
+              for. A max of 128 functions are supported.
+
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        stream: bool,
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
+        """
+        **Starting a new project?** We recommend trying
+        [Responses](https://platform.openai.com/docs/api-reference/responses) to take
+        advantage of the latest OpenAI platform features. Compare
+        [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
+
+        ---
+
+        Creates a model response for the given chat conversation. Learn more in the
+        [text generation](https://platform.openai.com/docs/guides/text-generation),
+        [vision](https://platform.openai.com/docs/guides/vision), and
+        [audio](https://platform.openai.com/docs/guides/audio) guides.
+
+        Parameter support can differ depending on the model used to generate the
+        response, particularly for newer reasoning models. Parameters that are only
+        supported for reasoning models are noted below. For the current state of
+        unsupported parameters in reasoning models,
+        [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+
+        Args:
+          messages: A list of messages comprising the conversation so far. Depending on the
+              [model](https://platform.openai.com/docs/models) you use, different message
+              types (modalities) are supported, like
+              [text](https://platform.openai.com/docs/guides/text-generation),
+              [images](https://platform.openai.com/docs/guides/vision), and
+              [audio](https://platform.openai.com/docs/guides/audio).
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+              for more information, along with the
+              [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+              guide for more information on how to handle the streaming events.
+
+          audio: Parameters for audio output. Required when audio output is requested with
+              `modalities: ["audio"]`.
+              [Learn more](https://platform.openai.com/docs/guides/audio).
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+          function_call: Deprecated in favor of `tool_choice`.
+
+              Controls which (if any) function is called by the model.
+
+              `none` means the model will not call a function and instead generates a message.
+
+              `auto` means the model can pick between generating a message or calling a
+              function.
+
+              Specifying a particular function via `{"name": "my_function"}` forces the model
+              to call that function.
+
+              `none` is the default when no functions are present. `auto` is the default if
+              functions are present.
+
+          functions: Deprecated in favor of `tools`.
+
+              A list of functions the model may generate JSON inputs for.
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the
+              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+              bias is added to the logits generated by the model prior to sampling. The exact
+              effect will vary per model, but values between -1 and 1 should decrease or
+              increase likelihood of selection; values like -100 or 100 should result in a ban
+              or exclusive selection of the relevant token.
+
+          logprobs: Whether to return log probabilities of the output tokens or not. If true,
+              returns the log probabilities of each output token returned in the `content` of
+              `message`.
+
+          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
+              completion. This value can be used to control
+              [costs](https://openai.com/api/pricing/) for text generated via API.
+
+              This value is now deprecated in favor of `max_completion_tokens`, and is not
+              compatible with
+              [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          modalities: Output types that you would like the model to generate. Most models are capable
+              of generating text, which is the default:
+
+              `["text"]`
+
+              The `gpt-4o-audio-preview` model can also be used to
+              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+              this model generate both text and audio responses, you can use:
+
+              `["text", "audio"]`
+
+          n: How many chat completion choices to generate for each input message. Note that
+              you will be charged based on the number of generated tokens across all of the
+              choices. Keep `n` as `1` to minimize costs.
+
+          parallel_tool_calls: Whether to enable
+              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+              during tool use.
+
+          prediction: Static predicted output content, such as the content of a text file that is
+              being regenerated.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+          reasoning_effort: **o-series models only**
+
+              Constrains effort on reasoning for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+              result in faster responses and fewer tokens used on reasoning in a response.
+
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which ensures the model will match your supplied JSON schema. Learn more
+              in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+              Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+              ensures the message the model generates is valid JSON. Using `json_schema` is
+              preferred for models that support it.
+
+          seed: This feature is in Beta. If specified, our system will make a best effort to
+              sample deterministically, such that repeated requests with the same `seed` and
+              parameters should return the same result. Determinism is not guaranteed, and you
+              should refer to the `system_fingerprint` response parameter to monitor changes
+              in the backend.
+
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', and the Project is Scale tier enabled, the system will
+                utilize scale tier credits until they are exhausted.
+              - If set to 'auto', and the Project is not Scale tier enabled, the request will
+                be processed using the default service tier with a lower uptime SLA and no
+                latency guarentee.
+              - If set to 'default', the request will be processed using the default service
+                tier with a lower uptime SLA and no latency guarentee.
+              - When not set, the default behavior is 'auto'.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          store: Whether or not to store the output of this chat completion request for use in
+              our [model distillation](https://platform.openai.com/docs/guides/distillation)
+              or [evals](https://platform.openai.com/docs/guides/evals) products.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+              not call any tool and instead generates a message. `auto` means the model can
+              pick between generating a message or calling one or more tools. `required` means
+              the model must call one or more tools. Specifying a particular tool via
+              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+              call that tool.
+
+              `none` is the default when no tools are present. `auto` is the default if tools
+              are present.
+
+          tools: A list of tools the model may call. Currently, only functions are supported as a
+              tool. Use this to provide a list of functions the model may generate JSON inputs
+              for. A max of 128 functions are supported.
+
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          web_search_options: This tool searches the web for relevant results to use in a response. Learn more
+              about the
+              [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["messages", "model"], ["messages", "model", "stream"])
+    async def create(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
+        validate_response_format(response_format)
+        return await self._post(
+            "/chat/completions",
+            body=await async_maybe_transform(
+                {
+                    "messages": messages,
+                    "model": model,
+                    "audio": audio,
+                    "frequency_penalty": frequency_penalty,
+                    "function_call": function_call,
+                    "functions": functions,
+                    "logit_bias": logit_bias,
+                    "logprobs": logprobs,
+                    "max_completion_tokens": max_completion_tokens,
+                    "max_tokens": max_tokens,
+                    "metadata": metadata,
+                    "modalities": modalities,
+                    "n": n,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "prediction": prediction,
+                    "presence_penalty": presence_penalty,
+                    "reasoning_effort": reasoning_effort,
+                    "response_format": response_format,
+                    "seed": seed,
+                    "service_tier": service_tier,
+                    "stop": stop,
+                    "store": store,
+                    "stream": stream,
+                    "stream_options": stream_options,
+                    "temperature": temperature,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_logprobs": top_logprobs,
+                    "top_p": top_p,
+                    "user": user,
+                    "web_search_options": web_search_options,
+                },
+                completion_create_params.CompletionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletion,
+            stream=stream or False,
+            stream_cls=AsyncStream[ChatCompletionChunk],
+        )
+
+    async def retrieve(
+        self,
+        completion_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion:
+        """Get a stored chat completion.
+
+        Only Chat Completions that have been created with
+        the `store` parameter set to `true` will be returned.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return await self._get(
+            f"/chat/completions/{completion_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletion,
+        )
+
+    async def update(
+        self,
+        completion_id: str,
+        *,
+        metadata: Optional[Metadata],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletion:
+        """Modify a stored chat completion.
+
+        Only Chat Completions that have been created
+        with the `store` parameter set to `true` can be modified. Currently, the only
+        supported modification is to update the `metadata` field.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return await self._post(
+            f"/chat/completions/{completion_id}",
+            body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletion,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        model: str | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]:
+        """List stored Chat Completions.
+
+        Only Chat Completions that have been stored with
+        the `store` parameter set to `true` will be returned.
+
+        Args:
+          after: Identifier for the last chat completion from the previous pagination request.
+
+          limit: Number of Chat Completions to retrieve.
+
+          metadata:
+              A list of metadata keys to filter the Chat Completions by. Example:
+
+              `metadata[key1]=value1&metadata[key2]=value2`
+
+          model: The model used to generate the Chat Completions.
+
+          order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
+              `desc` for descending order. Defaults to `asc`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/chat/completions",
+            page=AsyncCursorPage[ChatCompletion],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "metadata": metadata,
+                        "model": model,
+                        "order": order,
+                    },
+                    completion_list_params.CompletionListParams,
+                ),
+            ),
+            model=ChatCompletion,
+        )
+
+    async def delete(
+        self,
+        completion_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletionDeleted:
+        """Delete a stored chat completion.
+
+        Only Chat Completions that have been created
+        with the `store` parameter set to `true` can be deleted.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return await self._delete(
+            f"/chat/completions/{completion_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatCompletionDeleted,
+        )
+
+
+class CompletionsWithRawResponse:
+    def __init__(self, completions: Completions) -> None:
+        self._completions = completions
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            completions.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            completions.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            completions.update,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            completions.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            completions.delete,
+        )
+
+    @cached_property
+    def messages(self) -> MessagesWithRawResponse:
+        return MessagesWithRawResponse(self._completions.messages)
+
+
+class AsyncCompletionsWithRawResponse:
+    def __init__(self, completions: AsyncCompletions) -> None:
+        self._completions = completions
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            completions.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            completions.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            completions.update,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            completions.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            completions.delete,
+        )
+
+    @cached_property
+    def messages(self) -> AsyncMessagesWithRawResponse:
+        return AsyncMessagesWithRawResponse(self._completions.messages)
+
+
+class CompletionsWithStreamingResponse:
+    def __init__(self, completions: Completions) -> None:
+        self._completions = completions
+
+        self.create = to_streamed_response_wrapper(
+            completions.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            completions.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            completions.update,
+        )
+        self.list = to_streamed_response_wrapper(
+            completions.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            completions.delete,
+        )
+
+    @cached_property
+    def messages(self) -> MessagesWithStreamingResponse:
+        return MessagesWithStreamingResponse(self._completions.messages)
+
+
+class AsyncCompletionsWithStreamingResponse:
+    def __init__(self, completions: AsyncCompletions) -> None:
+        self._completions = completions
+
+        self.create = async_to_streamed_response_wrapper(
+            completions.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            completions.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            completions.update,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            completions.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            completions.delete,
+        )
+
+    @cached_property
+    def messages(self) -> AsyncMessagesWithStreamingResponse:
+        return AsyncMessagesWithStreamingResponse(self._completions.messages)
+
+
+def validate_response_format(response_format: object) -> None:
+    if inspect.isclass(response_format) and issubclass(response_format, pydantic.BaseModel):
+        raise TypeError(
+            "You tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `beta.chat.completions.parse()` instead"
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py
new file mode 100644
index 00000000..fac15fba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/chat/completions/messages.py
@@ -0,0 +1,212 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.chat.completions import message_list_params
+from ....types.chat.chat_completion_store_message import ChatCompletionStoreMessage
+
+__all__ = ["Messages", "AsyncMessages"]
+
+
+class Messages(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> MessagesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return MessagesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> MessagesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return MessagesWithStreamingResponse(self)
+
+    def list(
+        self,
+        completion_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[ChatCompletionStoreMessage]:
+        """Get the messages in a stored chat completion.
+
+        Only Chat Completions that have
+        been created with the `store` parameter set to `true` will be returned.
+
+        Args:
+          after: Identifier for the last message from the previous pagination request.
+
+          limit: Number of messages to retrieve.
+
+          order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
+              for descending order. Defaults to `asc`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return self._get_api_list(
+            f"/chat/completions/{completion_id}/messages",
+            page=SyncCursorPage[ChatCompletionStoreMessage],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    message_list_params.MessageListParams,
+                ),
+            ),
+            model=ChatCompletionStoreMessage,
+        )
+
+
+class AsyncMessages(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncMessagesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncMessagesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncMessagesWithStreamingResponse(self)
+
+    def list(
+        self,
+        completion_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]:
+        """Get the messages in a stored chat completion.
+
+        Only Chat Completions that have
+        been created with the `store` parameter set to `true` will be returned.
+
+        Args:
+          after: Identifier for the last message from the previous pagination request.
+
+          limit: Number of messages to retrieve.
+
+          order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
+              for descending order. Defaults to `asc`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not completion_id:
+            raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
+        return self._get_api_list(
+            f"/chat/completions/{completion_id}/messages",
+            page=AsyncCursorPage[ChatCompletionStoreMessage],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    message_list_params.MessageListParams,
+                ),
+            ),
+            model=ChatCompletionStoreMessage,
+        )
+
+
+class MessagesWithRawResponse:
+    def __init__(self, messages: Messages) -> None:
+        self._messages = messages
+
+        self.list = _legacy_response.to_raw_response_wrapper(
+            messages.list,
+        )
+
+
+class AsyncMessagesWithRawResponse:
+    def __init__(self, messages: AsyncMessages) -> None:
+        self._messages = messages
+
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            messages.list,
+        )
+
+
+class MessagesWithStreamingResponse:
+    def __init__(self, messages: Messages) -> None:
+        self._messages = messages
+
+        self.list = to_streamed_response_wrapper(
+            messages.list,
+        )
+
+
+class AsyncMessagesWithStreamingResponse:
+    def __init__(self, messages: AsyncMessages) -> None:
+        self._messages = messages
+
+        self.list = async_to_streamed_response_wrapper(
+            messages.list,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/completions.py b/.venv/lib/python3.12/site-packages/openai/resources/completions.py
new file mode 100644
index 00000000..171f5093
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/completions.py
@@ -0,0 +1,1148 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, overload
+
+import httpx
+
+from .. import _legacy_response
+from ..types import completion_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import (
+    required_args,
+    maybe_transform,
+    async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .._streaming import Stream, AsyncStream
+from .._base_client import (
+    make_request_options,
+)
+from ..types.completion import Completion
+from ..types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+
+__all__ = ["Completions", "AsyncCompletions"]
+
+
+class Completions(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> CompletionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return CompletionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> CompletionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return CompletionsWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Completion:
+        """
+        Creates a completion for the provided prompt and parameters.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          prompt: The prompt(s) to generate completions for, encoded as a string, array of
+              strings, array of tokens, or array of token arrays.
+
+              Note that <|endoftext|> is the document separator that the model sees during
+              training, so if a prompt is not specified the model will generate as if from the
+              beginning of a new document.
+
+          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
+              the highest log probability per token). Results cannot be streamed.
+
+              When used with `n`, `best_of` controls the number of candidate completions and
+              `n` specifies how many to return – `best_of` must be greater than `n`.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          echo: Echo back the prompt in addition to the completion
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+              tokenizer) to an associated bias value from -100 to 100. You can use this
+              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+              Mathematically, the bias is added to the logits generated by the model prior to
+              sampling. The exact effect will vary per model, but values between -1 and 1
+              should decrease or increase likelihood of selection; values like -100 or 100
+              should result in a ban or exclusive selection of the relevant token.
+
+              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+              from being generated.
+
+          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
+              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+              list of the 5 most likely tokens. The API will always return the `logprob` of
+              the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+              The maximum value for `logprobs` is 5.
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
+              completion.
+
+              The token count of your prompt plus `max_tokens` cannot exceed the model's
+              context length.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens.
+
+          n: How many completions to generate for each prompt.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          seed: If specified, our system will make a best effort to sample deterministically,
+              such that repeated requests with the same `seed` and parameters should return
+              the same result.
+
+              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+              response parameter to monitor changes in the backend.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          stream: Whether to stream back partial progress. If set, tokens will be sent as
+              data-only
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+              as they become available, with the stream terminated by a `data: [DONE]`
+              message.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          suffix: The suffix that comes after a completion of inserted text.
+
+              This parameter is only supported for `gpt-3.5-turbo-instruct`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+              We generally recommend altering this or `top_p` but not both.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        stream: Literal[True],
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[Completion]:
+        """
+        Creates a completion for the provided prompt and parameters.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          prompt: The prompt(s) to generate completions for, encoded as a string, array of
+              strings, array of tokens, or array of token arrays.
+
+              Note that <|endoftext|> is the document separator that the model sees during
+              training, so if a prompt is not specified the model will generate as if from the
+              beginning of a new document.
+
+          stream: Whether to stream back partial progress. If set, tokens will be sent as
+              data-only
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+              as they become available, with the stream terminated by a `data: [DONE]`
+              message.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+
+          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
+              the highest log probability per token). Results cannot be streamed.
+
+              When used with `n`, `best_of` controls the number of candidate completions and
+              `n` specifies how many to return – `best_of` must be greater than `n`.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          echo: Echo back the prompt in addition to the completion
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+              tokenizer) to an associated bias value from -100 to 100. You can use this
+              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+              Mathematically, the bias is added to the logits generated by the model prior to
+              sampling. The exact effect will vary per model, but values between -1 and 1
+              should decrease or increase likelihood of selection; values like -100 or 100
+              should result in a ban or exclusive selection of the relevant token.
+
+              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+              from being generated.
+
+          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
+              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+              list of the 5 most likely tokens. The API will always return the `logprob` of
+              the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+              The maximum value for `logprobs` is 5.
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
+              completion.
+
+              The token count of your prompt plus `max_tokens` cannot exceed the model's
+              context length.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens.
+
+          n: How many completions to generate for each prompt.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          seed: If specified, our system will make a best effort to sample deterministically,
+              such that repeated requests with the same `seed` and parameters should return
+              the same result.
+
+              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+              response parameter to monitor changes in the backend.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          suffix: The suffix that comes after a completion of inserted text.
+
+              This parameter is only supported for `gpt-3.5-turbo-instruct`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+              We generally recommend altering this or `top_p` but not both.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        stream: bool,
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Completion | Stream[Completion]:
+        """
+        Creates a completion for the provided prompt and parameters.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          prompt: The prompt(s) to generate completions for, encoded as a string, array of
+              strings, array of tokens, or array of token arrays.
+
+              Note that <|endoftext|> is the document separator that the model sees during
+              training, so if a prompt is not specified the model will generate as if from the
+              beginning of a new document.
+
+          stream: Whether to stream back partial progress. If set, tokens will be sent as
+              data-only
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+              as they become available, with the stream terminated by a `data: [DONE]`
+              message.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+
+          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
+              the highest log probability per token). Results cannot be streamed.
+
+              When used with `n`, `best_of` controls the number of candidate completions and
+              `n` specifies how many to return – `best_of` must be greater than `n`.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          echo: Echo back the prompt in addition to the completion
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+              tokenizer) to an associated bias value from -100 to 100. You can use this
+              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+              Mathematically, the bias is added to the logits generated by the model prior to
+              sampling. The exact effect will vary per model, but values between -1 and 1
+              should decrease or increase likelihood of selection; values like -100 or 100
+              should result in a ban or exclusive selection of the relevant token.
+
+              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+              from being generated.
+
+          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
+              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+              list of the 5 most likely tokens. The API will always return the `logprob` of
+              the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+              The maximum value for `logprobs` is 5.
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
+              completion.
+
+              The token count of your prompt plus `max_tokens` cannot exceed the model's
+              context length.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens.
+
+          n: How many completions to generate for each prompt.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          seed: If specified, our system will make a best effort to sample deterministically,
+              such that repeated requests with the same `seed` and parameters should return
+              the same result.
+
+              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+              response parameter to monitor changes in the backend.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          suffix: The suffix that comes after a completion of inserted text.
+
+              This parameter is only supported for `gpt-3.5-turbo-instruct`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+              We generally recommend altering this or `top_p` but not both.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["model", "prompt"], ["model", "prompt", "stream"])
+    def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Completion | Stream[Completion]:
+        return self._post(
+            "/completions",
+            body=maybe_transform(
+                {
+                    "model": model,
+                    "prompt": prompt,
+                    "best_of": best_of,
+                    "echo": echo,
+                    "frequency_penalty": frequency_penalty,
+                    "logit_bias": logit_bias,
+                    "logprobs": logprobs,
+                    "max_tokens": max_tokens,
+                    "n": n,
+                    "presence_penalty": presence_penalty,
+                    "seed": seed,
+                    "stop": stop,
+                    "stream": stream,
+                    "stream_options": stream_options,
+                    "suffix": suffix,
+                    "temperature": temperature,
+                    "top_p": top_p,
+                    "user": user,
+                },
+                completion_create_params.CompletionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Completion,
+            stream=stream or False,
+            stream_cls=Stream[Completion],
+        )
+
+
+class AsyncCompletions(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncCompletionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncCompletionsWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Completion:
+        """
+        Creates a completion for the provided prompt and parameters.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          prompt: The prompt(s) to generate completions for, encoded as a string, array of
+              strings, array of tokens, or array of token arrays.
+
+              Note that <|endoftext|> is the document separator that the model sees during
+              training, so if a prompt is not specified the model will generate as if from the
+              beginning of a new document.
+
+          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
+              the highest log probability per token). Results cannot be streamed.
+
+              When used with `n`, `best_of` controls the number of candidate completions and
+              `n` specifies how many to return – `best_of` must be greater than `n`.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          echo: Echo back the prompt in addition to the completion
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+              tokenizer) to an associated bias value from -100 to 100. You can use this
+              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+              Mathematically, the bias is added to the logits generated by the model prior to
+              sampling. The exact effect will vary per model, but values between -1 and 1
+              should decrease or increase likelihood of selection; values like -100 or 100
+              should result in a ban or exclusive selection of the relevant token.
+
+              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+              from being generated.
+
+          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
+              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+              list of the 5 most likely tokens. The API will always return the `logprob` of
+              the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+              The maximum value for `logprobs` is 5.
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
+              completion.
+
+              The token count of your prompt plus `max_tokens` cannot exceed the model's
+              context length.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens.
+
+          n: How many completions to generate for each prompt.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          seed: If specified, our system will make a best effort to sample deterministically,
+              such that repeated requests with the same `seed` and parameters should return
+              the same result.
+
+              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+              response parameter to monitor changes in the backend.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          stream: Whether to stream back partial progress. If set, tokens will be sent as
+              data-only
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+              as they become available, with the stream terminated by a `data: [DONE]`
+              message.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          suffix: The suffix that comes after a completion of inserted text.
+
+              This parameter is only supported for `gpt-3.5-turbo-instruct`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+              We generally recommend altering this or `top_p` but not both.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        stream: Literal[True],
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[Completion]:
+        """
+        Creates a completion for the provided prompt and parameters.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          prompt: The prompt(s) to generate completions for, encoded as a string, array of
+              strings, array of tokens, or array of token arrays.
+
+              Note that <|endoftext|> is the document separator that the model sees during
+              training, so if a prompt is not specified the model will generate as if from the
+              beginning of a new document.
+
+          stream: Whether to stream back partial progress. If set, tokens will be sent as
+              data-only
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+              as they become available, with the stream terminated by a `data: [DONE]`
+              message.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+
+          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
+              the highest log probability per token). Results cannot be streamed.
+
+              When used with `n`, `best_of` controls the number of candidate completions and
+              `n` specifies how many to return – `best_of` must be greater than `n`.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          echo: Echo back the prompt in addition to the completion
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+              tokenizer) to an associated bias value from -100 to 100. You can use this
+              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+              Mathematically, the bias is added to the logits generated by the model prior to
+              sampling. The exact effect will vary per model, but values between -1 and 1
+              should decrease or increase likelihood of selection; values like -100 or 100
+              should result in a ban or exclusive selection of the relevant token.
+
+              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+              from being generated.
+
+          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
+              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+              list of the 5 most likely tokens. The API will always return the `logprob` of
+              the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+              The maximum value for `logprobs` is 5.
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
+              completion.
+
+              The token count of your prompt plus `max_tokens` cannot exceed the model's
+              context length.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens.
+
+          n: How many completions to generate for each prompt.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          seed: If specified, our system will make a best effort to sample deterministically,
+              such that repeated requests with the same `seed` and parameters should return
+              the same result.
+
+              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+              response parameter to monitor changes in the backend.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          suffix: The suffix that comes after a completion of inserted text.
+
+              This parameter is only supported for `gpt-3.5-turbo-instruct`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+              We generally recommend altering this or `top_p` but not both.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        stream: bool,
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Completion | AsyncStream[Completion]:
+        """
+        Creates a completion for the provided prompt and parameters.
+
+        Args:
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          prompt: The prompt(s) to generate completions for, encoded as a string, array of
+              strings, array of tokens, or array of token arrays.
+
+              Note that <|endoftext|> is the document separator that the model sees during
+              training, so if a prompt is not specified the model will generate as if from the
+              beginning of a new document.
+
+          stream: Whether to stream back partial progress. If set, tokens will be sent as
+              data-only
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+              as they become available, with the stream terminated by a `data: [DONE]`
+              message.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+
+          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
+              the highest log probability per token). Results cannot be streamed.
+
+              When used with `n`, `best_of` controls the number of candidate completions and
+              `n` specifies how many to return – `best_of` must be greater than `n`.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          echo: Echo back the prompt in addition to the completion
+
+          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+              existing frequency in the text so far, decreasing the model's likelihood to
+              repeat the same line verbatim.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+              tokenizer) to an associated bias value from -100 to 100. You can use this
+              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+              Mathematically, the bias is added to the logits generated by the model prior to
+              sampling. The exact effect will vary per model, but values between -1 and 1
+              should decrease or increase likelihood of selection; values like -100 or 100
+              should result in a ban or exclusive selection of the relevant token.
+
+              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+              from being generated.
+
+          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
+              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+              list of the 5 most likely tokens. The API will always return the `logprob` of
+              the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+              The maximum value for `logprobs` is 5.
+
+          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
+              completion.
+
+              The token count of your prompt plus `max_tokens` cannot exceed the model's
+              context length.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens.
+
+          n: How many completions to generate for each prompt.
+
+              **Note:** Because this parameter generates many completions, it can quickly
+              consume your token quota. Use carefully and ensure that you have reasonable
+              settings for `max_tokens` and `stop`.
+
+          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+              whether they appear in the text so far, increasing the model's likelihood to
+              talk about new topics.
+
+              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+
+          seed: If specified, our system will make a best effort to sample deterministically,
+              such that repeated requests with the same `seed` and parameters should return
+              the same result.
+
+              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+              response parameter to monitor changes in the backend.
+
+          stop: Up to 4 sequences where the API will stop generating further tokens. The
+              returned text will not contain the stop sequence.
+
+          stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+          suffix: The suffix that comes after a completion of inserted text.
+
+              This parameter is only supported for `gpt-3.5-turbo-instruct`.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
+              We generally recommend altering this or `top_p` but not both.
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["model", "prompt"], ["model", "prompt", "stream"])
+    async def create(
+        self,
+        *,
+        model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
+        prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
+        best_of: Optional[int] | NotGiven = NOT_GIVEN,
+        echo: Optional[bool] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Completion | AsyncStream[Completion]:
+        return await self._post(
+            "/completions",
+            body=await async_maybe_transform(
+                {
+                    "model": model,
+                    "prompt": prompt,
+                    "best_of": best_of,
+                    "echo": echo,
+                    "frequency_penalty": frequency_penalty,
+                    "logit_bias": logit_bias,
+                    "logprobs": logprobs,
+                    "max_tokens": max_tokens,
+                    "n": n,
+                    "presence_penalty": presence_penalty,
+                    "seed": seed,
+                    "stop": stop,
+                    "stream": stream,
+                    "stream_options": stream_options,
+                    "suffix": suffix,
+                    "temperature": temperature,
+                    "top_p": top_p,
+                    "user": user,
+                },
+                completion_create_params.CompletionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Completion,
+            stream=stream or False,
+            stream_cls=AsyncStream[Completion],
+        )
+
+
+class CompletionsWithRawResponse:
+    def __init__(self, completions: Completions) -> None:
+        self._completions = completions
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            completions.create,
+        )
+
+
+class AsyncCompletionsWithRawResponse:
+    def __init__(self, completions: AsyncCompletions) -> None:
+        self._completions = completions
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            completions.create,
+        )
+
+
+class CompletionsWithStreamingResponse:
+    def __init__(self, completions: Completions) -> None:
+        self._completions = completions
+
+        self.create = to_streamed_response_wrapper(
+            completions.create,
+        )
+
+
+class AsyncCompletionsWithStreamingResponse:
+    def __init__(self, completions: AsyncCompletions) -> None:
+        self._completions = completions
+
+        self.create = async_to_streamed_response_wrapper(
+            completions.create,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/embeddings.py b/.venv/lib/python3.12/site-packages/openai/resources/embeddings.py
new file mode 100644
index 00000000..a392d5eb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/embeddings.py
@@ -0,0 +1,290 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import array
+import base64
+from typing import List, Union, Iterable, cast
+from typing_extensions import Literal
+
+import httpx
+
+from .. import _legacy_response
+from ..types import embedding_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import is_given, maybe_transform
+from .._compat import cached_property
+from .._extras import numpy as np, has_numpy
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .._base_client import make_request_options
+from ..types.embedding_model import EmbeddingModel
+from ..types.create_embedding_response import CreateEmbeddingResponse
+
+__all__ = ["Embeddings", "AsyncEmbeddings"]
+
+
+class Embeddings(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> EmbeddingsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return EmbeddingsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> EmbeddingsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return EmbeddingsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
+        model: Union[str, EmbeddingModel],
+        dimensions: int | NotGiven = NOT_GIVEN,
+        encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> CreateEmbeddingResponse:
+        """
+        Creates an embedding vector representing the input text.
+
+        Args:
+          input: Input text to embed, encoded as a string or array of tokens. To embed multiple
+              inputs in a single request, pass an array of strings or array of token arrays.
+              The input must not exceed the max input tokens for the model (8192 tokens for
+              `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
+              dimensions or less.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens. Some models may also impose a limit on total number of
+              tokens summed across inputs.
+
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          dimensions: The number of dimensions the resulting output embeddings should have. Only
+              supported in `text-embedding-3` and later models.
+
+          encoding_format: The format to return the embeddings in. Can be either `float` or
+              [`base64`](https://pypi.org/project/pybase64/).
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        params = {
+            "input": input,
+            "model": model,
+            "user": user,
+            "dimensions": dimensions,
+            "encoding_format": encoding_format,
+        }
+        if not is_given(encoding_format):
+            params["encoding_format"] = "base64"
+
+        def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
+            if is_given(encoding_format):
+                # don't modify the response object if a user explicitly asked for a format
+                return obj
+
+            for embedding in obj.data:
+                data = cast(object, embedding.embedding)
+                if not isinstance(data, str):
+                    continue
+                if not has_numpy():
+                    # use array for base64 optimisation
+                    embedding.embedding = array.array("f", base64.b64decode(data)).tolist()
+                else:
+                    embedding.embedding = np.frombuffer(  # type: ignore[no-untyped-call]
+                        base64.b64decode(data), dtype="float32"
+                    ).tolist()
+
+            return obj
+
+        return self._post(
+            "/embeddings",
+            body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            cast_to=CreateEmbeddingResponse,
+        )
+
+
+class AsyncEmbeddings(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncEmbeddingsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncEmbeddingsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
+        model: Union[str, EmbeddingModel],
+        dimensions: int | NotGiven = NOT_GIVEN,
+        encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> CreateEmbeddingResponse:
+        """
+        Creates an embedding vector representing the input text.
+
+        Args:
+          input: Input text to embed, encoded as a string or array of tokens. To embed multiple
+              inputs in a single request, pass an array of strings or array of token arrays.
+              The input must not exceed the max input tokens for the model (8192 tokens for
+              `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
+              dimensions or less.
+              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+              for counting tokens. Some models may also impose a limit on total number of
+              tokens summed across inputs.
+
+          model: ID of the model to use. You can use the
+              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+              see all of your available models, or see our
+              [Model overview](https://platform.openai.com/docs/models) for descriptions of
+              them.
+
+          dimensions: The number of dimensions the resulting output embeddings should have. Only
+              supported in `text-embedding-3` and later models.
+
+          encoding_format: The format to return the embeddings in. Can be either `float` or
+              [`base64`](https://pypi.org/project/pybase64/).
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        params = {
+            "input": input,
+            "model": model,
+            "user": user,
+            "dimensions": dimensions,
+            "encoding_format": encoding_format,
+        }
+        if not is_given(encoding_format):
+            params["encoding_format"] = "base64"
+
+        def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
+            if is_given(encoding_format):
+                # don't modify the response object if a user explicitly asked for a format
+                return obj
+
+            for embedding in obj.data:
+                data = cast(object, embedding.embedding)
+                if not isinstance(data, str):
+                    continue
+                if not has_numpy():
+                    # use array for base64 optimisation
+                    embedding.embedding = array.array("f", base64.b64decode(data)).tolist()
+                else:
+                    embedding.embedding = np.frombuffer(  # type: ignore[no-untyped-call]
+                        base64.b64decode(data), dtype="float32"
+                    ).tolist()
+
+            return obj
+
+        return await self._post(
+            "/embeddings",
+            body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            cast_to=CreateEmbeddingResponse,
+        )
+
+
+class EmbeddingsWithRawResponse:
+    def __init__(self, embeddings: Embeddings) -> None:
+        self._embeddings = embeddings
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            embeddings.create,
+        )
+
+
+class AsyncEmbeddingsWithRawResponse:
+    def __init__(self, embeddings: AsyncEmbeddings) -> None:
+        self._embeddings = embeddings
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            embeddings.create,
+        )
+
+
+class EmbeddingsWithStreamingResponse:
+    def __init__(self, embeddings: Embeddings) -> None:
+        self._embeddings = embeddings
+
+        self.create = to_streamed_response_wrapper(
+            embeddings.create,
+        )
+
+
+class AsyncEmbeddingsWithStreamingResponse:
+    def __init__(self, embeddings: AsyncEmbeddings) -> None:
+        self._embeddings = embeddings
+
+        self.create = async_to_streamed_response_wrapper(
+            embeddings.create,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/files.py b/.venv/lib/python3.12/site-packages/openai/resources/files.py
new file mode 100644
index 00000000..2eaa4a64
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/files.py
@@ -0,0 +1,767 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import time
+import typing_extensions
+from typing import Mapping, cast
+from typing_extensions import Literal
+
+import httpx
+
+from .. import _legacy_response
+from ..types import FilePurpose, file_list_params, file_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from .._utils import (
+    extract_files,
+    maybe_transform,
+    deepcopy_minimal,
+    async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+    StreamedBinaryAPIResponse,
+    AsyncStreamedBinaryAPIResponse,
+    to_streamed_response_wrapper,
+    async_to_streamed_response_wrapper,
+    to_custom_streamed_response_wrapper,
+    async_to_custom_streamed_response_wrapper,
+)
+from ..pagination import SyncCursorPage, AsyncCursorPage
+from .._base_client import AsyncPaginator, make_request_options
+from ..types.file_object import FileObject
+from ..types.file_deleted import FileDeleted
+from ..types.file_purpose import FilePurpose
+
+__all__ = ["Files", "AsyncFiles"]
+
+
+class Files(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> FilesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return FilesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> FilesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return FilesWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        file: FileTypes,
+        purpose: FilePurpose,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FileObject:
+        """Upload a file that can be used across various endpoints.
+
+        Individual files can be
+        up to 512 MB, and the size of all files uploaded by one organization can be up
+        to 100 GB.
+
+        The Assistants API supports files up to 2 million tokens and of specific file
+        types. See the
+        [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
+        details.
+
+        The Fine-tuning API only supports `.jsonl` files. The input also has certain
+        required formats for fine-tuning
+        [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
+        [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
+        models.
+
+        The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
+        has a specific required
+        [format](https://platform.openai.com/docs/api-reference/batch/request-input).
+
+        Please [contact us](https://help.openai.com/) if you need to increase these
+        storage limits.
+
+        Args:
+          file: The File object (not file name) to be uploaded.
+
+          purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
+              Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
+              fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
+              Flexible file type for any purpose - `evals`: Used for eval data sets
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "file": file,
+                "purpose": purpose,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return self._post(
+            "/files",
+            body=maybe_transform(body, file_create_params.FileCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FileObject,
+        )
+
+    def retrieve(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FileObject:
+        """
+        Returns information about a specific file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        return self._get(
+            f"/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FileObject,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        purpose: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[FileObject]:
+        """Returns a list of files.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              10,000, and the default is 10,000.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          purpose: Only return files with the given purpose.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/files",
+            page=SyncCursorPage[FileObject],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "order": order,
+                        "purpose": purpose,
+                    },
+                    file_list_params.FileListParams,
+                ),
+            ),
+            model=FileObject,
+        )
+
+    def delete(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FileDeleted:
+        """
+        Delete a file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        return self._delete(
+            f"/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FileDeleted,
+        )
+
+    def content(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> _legacy_response.HttpxBinaryResponseContent:
+        """
+        Returns the contents of the specified file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+        return self._get(
+            f"/files/{file_id}/content",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
+        )
+
+    @typing_extensions.deprecated("The `.content()` method should be used instead")
+    def retrieve_content(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> str:
+        """
+        Returns the contents of the specified file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        return self._get(
+            f"/files/{file_id}/content",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=str,
+        )
+
+    def wait_for_processing(
+        self,
+        id: str,
+        *,
+        poll_interval: float = 5.0,
+        max_wait_seconds: float = 30 * 60,
+    ) -> FileObject:
+        """Waits for the given file to be processed, default timeout is 30 mins."""
+        TERMINAL_STATES = {"processed", "error", "deleted"}
+
+        start = time.time()
+        file = self.retrieve(id)
+        while file.status not in TERMINAL_STATES:
+            self._sleep(poll_interval)
+
+            file = self.retrieve(id)
+            if time.time() - start > max_wait_seconds:
+                raise RuntimeError(
+                    f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
+                )
+
+        return file
+
+
+class AsyncFiles(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncFilesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncFilesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncFilesWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        file: FileTypes,
+        purpose: FilePurpose,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FileObject:
+        """Upload a file that can be used across various endpoints.
+
+        Individual files can be
+        up to 512 MB, and the size of all files uploaded by one organization can be up
+        to 100 GB.
+
+        The Assistants API supports files up to 2 million tokens and of specific file
+        types. See the
+        [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
+        details.
+
+        The Fine-tuning API only supports `.jsonl` files. The input also has certain
+        required formats for fine-tuning
+        [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
+        [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
+        models.
+
+        The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
+        has a specific required
+        [format](https://platform.openai.com/docs/api-reference/batch/request-input).
+
+        Please [contact us](https://help.openai.com/) if you need to increase these
+        storage limits.
+
+        Args:
+          file: The File object (not file name) to be uploaded.
+
+          purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
+              Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
+              fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
+              Flexible file type for any purpose - `evals`: Used for eval data sets
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "file": file,
+                "purpose": purpose,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return await self._post(
+            "/files",
+            body=await async_maybe_transform(body, file_create_params.FileCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FileObject,
+        )
+
+    async def retrieve(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FileObject:
+        """
+        Returns information about a specific file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        return await self._get(
+            f"/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FileObject,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        purpose: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]:
+        """Returns a list of files.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              10,000, and the default is 10,000.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          purpose: Only return files with the given purpose.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/files",
+            page=AsyncCursorPage[FileObject],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "order": order,
+                        "purpose": purpose,
+                    },
+                    file_list_params.FileListParams,
+                ),
+            ),
+            model=FileObject,
+        )
+
+    async def delete(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FileDeleted:
+        """
+        Delete a file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        return await self._delete(
+            f"/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FileDeleted,
+        )
+
+    async def content(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> _legacy_response.HttpxBinaryResponseContent:
+        """
+        Returns the contents of the specified file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+        return await self._get(
+            f"/files/{file_id}/content",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
+        )
+
+    @typing_extensions.deprecated("The `.content()` method should be used instead")
+    async def retrieve_content(
+        self,
+        file_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> str:
+        """
+        Returns the contents of the specified file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        return await self._get(
+            f"/files/{file_id}/content",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=str,
+        )
+
+    async def wait_for_processing(
+        self,
+        id: str,
+        *,
+        poll_interval: float = 5.0,
+        max_wait_seconds: float = 30 * 60,
+    ) -> FileObject:
+        """Waits for the given file to be processed, default timeout is 30 mins."""
+        TERMINAL_STATES = {"processed", "error", "deleted"}
+
+        start = time.time()
+        file = await self.retrieve(id)
+        while file.status not in TERMINAL_STATES:
+            await self._sleep(poll_interval)
+
+            file = await self.retrieve(id)
+            if time.time() - start > max_wait_seconds:
+                raise RuntimeError(
+                    f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
+                )
+
+        return file
+
+
+class FilesWithRawResponse:
+    def __init__(self, files: Files) -> None:
+        self._files = files
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            files.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            files.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            files.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            files.delete,
+        )
+        self.content = _legacy_response.to_raw_response_wrapper(
+            files.content,
+        )
+        self.retrieve_content = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                files.retrieve_content  # pyright: ignore[reportDeprecated],
+            )
+        )
+
+
+class AsyncFilesWithRawResponse:
+    def __init__(self, files: AsyncFiles) -> None:
+        self._files = files
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            files.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            files.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            files.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            files.delete,
+        )
+        self.content = _legacy_response.async_to_raw_response_wrapper(
+            files.content,
+        )
+        self.retrieve_content = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                files.retrieve_content  # pyright: ignore[reportDeprecated],
+            )
+        )
+
+
+class FilesWithStreamingResponse:
+    def __init__(self, files: Files) -> None:
+        self._files = files
+
+        self.create = to_streamed_response_wrapper(
+            files.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            files.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            files.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            files.delete,
+        )
+        self.content = to_custom_streamed_response_wrapper(
+            files.content,
+            StreamedBinaryAPIResponse,
+        )
+        self.retrieve_content = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                files.retrieve_content  # pyright: ignore[reportDeprecated],
+            )
+        )
+
+
+class AsyncFilesWithStreamingResponse:
+    def __init__(self, files: AsyncFiles) -> None:
+        self._files = files
+
+        self.create = async_to_streamed_response_wrapper(
+            files.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            files.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            files.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            files.delete,
+        )
+        self.content = async_to_custom_streamed_response_wrapper(
+            files.content,
+            AsyncStreamedBinaryAPIResponse,
+        )
+        self.retrieve_content = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                files.retrieve_content  # pyright: ignore[reportDeprecated],
+            )
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/__init__.py
new file mode 100644
index 00000000..7765231f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .jobs import (
+    Jobs,
+    AsyncJobs,
+    JobsWithRawResponse,
+    AsyncJobsWithRawResponse,
+    JobsWithStreamingResponse,
+    AsyncJobsWithStreamingResponse,
+)
+from .fine_tuning import (
+    FineTuning,
+    AsyncFineTuning,
+    FineTuningWithRawResponse,
+    AsyncFineTuningWithRawResponse,
+    FineTuningWithStreamingResponse,
+    AsyncFineTuningWithStreamingResponse,
+)
+
+__all__ = [
+    "Jobs",
+    "AsyncJobs",
+    "JobsWithRawResponse",
+    "AsyncJobsWithRawResponse",
+    "JobsWithStreamingResponse",
+    "AsyncJobsWithStreamingResponse",
+    "FineTuning",
+    "AsyncFineTuning",
+    "FineTuningWithRawResponse",
+    "AsyncFineTuningWithRawResponse",
+    "FineTuningWithStreamingResponse",
+    "AsyncFineTuningWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/fine_tuning.py b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/fine_tuning.py
new file mode 100644
index 00000000..eebde07d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/fine_tuning.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from .jobs.jobs import (
+    Jobs,
+    AsyncJobs,
+    JobsWithRawResponse,
+    AsyncJobsWithRawResponse,
+    JobsWithStreamingResponse,
+    AsyncJobsWithStreamingResponse,
+)
+from ..._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["FineTuning", "AsyncFineTuning"]
+
+
+class FineTuning(SyncAPIResource):
+    @cached_property
+    def jobs(self) -> Jobs:
+        return Jobs(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> FineTuningWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return FineTuningWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> FineTuningWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return FineTuningWithStreamingResponse(self)
+
+
+class AsyncFineTuning(AsyncAPIResource):
+    @cached_property
+    def jobs(self) -> AsyncJobs:
+        return AsyncJobs(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncFineTuningWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncFineTuningWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncFineTuningWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncFineTuningWithStreamingResponse(self)
+
+
+class FineTuningWithRawResponse:
+    def __init__(self, fine_tuning: FineTuning) -> None:
+        self._fine_tuning = fine_tuning
+
+    @cached_property
+    def jobs(self) -> JobsWithRawResponse:
+        return JobsWithRawResponse(self._fine_tuning.jobs)
+
+
+class AsyncFineTuningWithRawResponse:
+    def __init__(self, fine_tuning: AsyncFineTuning) -> None:
+        self._fine_tuning = fine_tuning
+
+    @cached_property
+    def jobs(self) -> AsyncJobsWithRawResponse:
+        return AsyncJobsWithRawResponse(self._fine_tuning.jobs)
+
+
+class FineTuningWithStreamingResponse:
+    def __init__(self, fine_tuning: FineTuning) -> None:
+        self._fine_tuning = fine_tuning
+
+    @cached_property
+    def jobs(self) -> JobsWithStreamingResponse:
+        return JobsWithStreamingResponse(self._fine_tuning.jobs)
+
+
+class AsyncFineTuningWithStreamingResponse:
+    def __init__(self, fine_tuning: AsyncFineTuning) -> None:
+        self._fine_tuning = fine_tuning
+
+    @cached_property
+    def jobs(self) -> AsyncJobsWithStreamingResponse:
+        return AsyncJobsWithStreamingResponse(self._fine_tuning.jobs)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/__init__.py
new file mode 100644
index 00000000..94cd1fb7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .jobs import (
+    Jobs,
+    AsyncJobs,
+    JobsWithRawResponse,
+    AsyncJobsWithRawResponse,
+    JobsWithStreamingResponse,
+    AsyncJobsWithStreamingResponse,
+)
+from .checkpoints import (
+    Checkpoints,
+    AsyncCheckpoints,
+    CheckpointsWithRawResponse,
+    AsyncCheckpointsWithRawResponse,
+    CheckpointsWithStreamingResponse,
+    AsyncCheckpointsWithStreamingResponse,
+)
+
+__all__ = [
+    "Checkpoints",
+    "AsyncCheckpoints",
+    "CheckpointsWithRawResponse",
+    "AsyncCheckpointsWithRawResponse",
+    "CheckpointsWithStreamingResponse",
+    "AsyncCheckpointsWithStreamingResponse",
+    "Jobs",
+    "AsyncJobs",
+    "JobsWithRawResponse",
+    "AsyncJobsWithRawResponse",
+    "JobsWithStreamingResponse",
+    "AsyncJobsWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/checkpoints.py b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/checkpoints.py
new file mode 100644
index 00000000..f86462e5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/checkpoints.py
@@ -0,0 +1,199 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import (
+    AsyncPaginator,
+    make_request_options,
+)
+from ....types.fine_tuning.jobs import checkpoint_list_params
+from ....types.fine_tuning.jobs.fine_tuning_job_checkpoint import FineTuningJobCheckpoint
+
+__all__ = ["Checkpoints", "AsyncCheckpoints"]
+
+
+class Checkpoints(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> CheckpointsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return CheckpointsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return CheckpointsWithStreamingResponse(self)
+
+    def list(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[FineTuningJobCheckpoint]:
+        """
+        List checkpoints for a fine-tuning job.
+
+        Args:
+          after: Identifier for the last checkpoint ID from the previous pagination request.
+
+          limit: Number of checkpoints to retrieve.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return self._get_api_list(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
+            page=SyncCursorPage[FineTuningJobCheckpoint],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    checkpoint_list_params.CheckpointListParams,
+                ),
+            ),
+            model=FineTuningJobCheckpoint,
+        )
+
+
+class AsyncCheckpoints(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncCheckpointsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncCheckpointsWithStreamingResponse(self)
+
+    def list(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[FineTuningJobCheckpoint, AsyncCursorPage[FineTuningJobCheckpoint]]:
+        """
+        List checkpoints for a fine-tuning job.
+
+        Args:
+          after: Identifier for the last checkpoint ID from the previous pagination request.
+
+          limit: Number of checkpoints to retrieve.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return self._get_api_list(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
+            page=AsyncCursorPage[FineTuningJobCheckpoint],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    checkpoint_list_params.CheckpointListParams,
+                ),
+            ),
+            model=FineTuningJobCheckpoint,
+        )
+
+
+class CheckpointsWithRawResponse:
+    def __init__(self, checkpoints: Checkpoints) -> None:
+        self._checkpoints = checkpoints
+
+        self.list = _legacy_response.to_raw_response_wrapper(
+            checkpoints.list,
+        )
+
+
+class AsyncCheckpointsWithRawResponse:
+    def __init__(self, checkpoints: AsyncCheckpoints) -> None:
+        self._checkpoints = checkpoints
+
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            checkpoints.list,
+        )
+
+
+class CheckpointsWithStreamingResponse:
+    def __init__(self, checkpoints: Checkpoints) -> None:
+        self._checkpoints = checkpoints
+
+        self.list = to_streamed_response_wrapper(
+            checkpoints.list,
+        )
+
+
+class AsyncCheckpointsWithStreamingResponse:
+    def __init__(self, checkpoints: AsyncCheckpoints) -> None:
+        self._checkpoints = checkpoints
+
+        self.list = async_to_streamed_response_wrapper(
+            checkpoints.list,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/jobs.py b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/jobs.py
new file mode 100644
index 00000000..bbeff60b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/fine_tuning/jobs/jobs.py
@@ -0,0 +1,761 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ...._compat import cached_property
+from .checkpoints import (
+    Checkpoints,
+    AsyncCheckpoints,
+    CheckpointsWithRawResponse,
+    AsyncCheckpointsWithRawResponse,
+    CheckpointsWithStreamingResponse,
+    AsyncCheckpointsWithStreamingResponse,
+)
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import (
+    AsyncPaginator,
+    make_request_options,
+)
+from ....types.fine_tuning import job_list_params, job_create_params, job_list_events_params
+from ....types.shared_params.metadata import Metadata
+from ....types.fine_tuning.fine_tuning_job import FineTuningJob
+from ....types.fine_tuning.fine_tuning_job_event import FineTuningJobEvent
+
+__all__ = ["Jobs", "AsyncJobs"]
+
+
+class Jobs(SyncAPIResource):
+    @cached_property
+    def checkpoints(self) -> Checkpoints:
+        return Checkpoints(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> JobsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return JobsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> JobsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return JobsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
+        training_file: str,
+        hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
+        integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        method: job_create_params.Method | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        validation_file: Optional[str] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FineTuningJob:
+        """
+        Creates a fine-tuning job which begins the process of creating a new model from
+        a given dataset.
+
+        Response includes details of the enqueued job including job status and the name
+        of the fine-tuned models once complete.
+
+        [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+
+        Args:
+          model: The name of the model to fine-tune. You can select one of the
+              [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
+
+          training_file: The ID of an uploaded file that contains training data.
+
+              See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+              for how to upload a file.
+
+              Your dataset must be formatted as a JSONL file. Additionally, you must upload
+              your file with the purpose `fine-tune`.
+
+              The contents of the file should differ depending on if the model uses the
+              [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
+              [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
+              format, or if the fine-tuning method uses the
+              [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
+              format.
+
+              See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+              for more details.
+
+          hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
+              in favor of `method`, and should be passed in under the `method` parameter.
+
+          integrations: A list of integrations to enable for your fine-tuning job.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          method: The method used for fine-tuning.
+
+          seed: The seed controls the reproducibility of the job. Passing in the same seed and
+              job parameters should produce the same results, but may differ in rare cases. If
+              a seed is not specified, one will be generated for you.
+
+          suffix: A string of up to 64 characters that will be added to your fine-tuned model
+              name.
+
+              For example, a `suffix` of "custom-model-name" would produce a model name like
+              `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
+
+          validation_file: The ID of an uploaded file that contains validation data.
+
+              If you provide this file, the data is used to generate validation metrics
+              periodically during fine-tuning. These metrics can be viewed in the fine-tuning
+              results file. The same data should not be present in both train and validation
+              files.
+
+              Your dataset must be formatted as a JSONL file. You must upload your file with
+              the purpose `fine-tune`.
+
+              See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+              for more details.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._post(
+            "/fine_tuning/jobs",
+            body=maybe_transform(
+                {
+                    "model": model,
+                    "training_file": training_file,
+                    "hyperparameters": hyperparameters,
+                    "integrations": integrations,
+                    "metadata": metadata,
+                    "method": method,
+                    "seed": seed,
+                    "suffix": suffix,
+                    "validation_file": validation_file,
+                },
+                job_create_params.JobCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FineTuningJob,
+        )
+
+    def retrieve(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FineTuningJob:
+        """
+        Get info about a fine-tuning job.
+
+        [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return self._get(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FineTuningJob,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[FineTuningJob]:
+        """
+        List your organization's fine-tuning jobs
+
+        Args:
+          after: Identifier for the last job from the previous pagination request.
+
+          limit: Number of fine-tuning jobs to retrieve.
+
+          metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
+              Alternatively, set `metadata=null` to indicate no metadata.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/fine_tuning/jobs",
+            page=SyncCursorPage[FineTuningJob],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "metadata": metadata,
+                    },
+                    job_list_params.JobListParams,
+                ),
+            ),
+            model=FineTuningJob,
+        )
+
+    def cancel(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FineTuningJob:
+        """
+        Immediately cancel a fine-tune job.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return self._post(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FineTuningJob,
+        )
+
+    def list_events(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[FineTuningJobEvent]:
+        """
+        Get status updates for a fine-tuning job.
+
+        Args:
+          after: Identifier for the last event from the previous pagination request.
+
+          limit: Number of events to retrieve.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return self._get_api_list(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
+            page=SyncCursorPage[FineTuningJobEvent],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    job_list_events_params.JobListEventsParams,
+                ),
+            ),
+            model=FineTuningJobEvent,
+        )
+
+
+class AsyncJobs(AsyncAPIResource):
+    @cached_property
+    def checkpoints(self) -> AsyncCheckpoints:
+        return AsyncCheckpoints(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncJobsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncJobsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncJobsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncJobsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
+        training_file: str,
+        hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
+        integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        method: job_create_params.Method | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        suffix: Optional[str] | NotGiven = NOT_GIVEN,
+        validation_file: Optional[str] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FineTuningJob:
+        """
+        Creates a fine-tuning job which begins the process of creating a new model from
+        a given dataset.
+
+        Response includes details of the enqueued job including job status and the name
+        of the fine-tuned models once complete.
+
+        [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+
+        Args:
+          model: The name of the model to fine-tune. You can select one of the
+              [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
+
+          training_file: The ID of an uploaded file that contains training data.
+
+              See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+              for how to upload a file.
+
+              Your dataset must be formatted as a JSONL file. Additionally, you must upload
+              your file with the purpose `fine-tune`.
+
+              The contents of the file should differ depending on if the model uses the
+              [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
+              [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
+              format, or if the fine-tuning method uses the
+              [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
+              format.
+
+              See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+              for more details.
+
+          hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
+              in favor of `method`, and should be passed in under the `method` parameter.
+
+          integrations: A list of integrations to enable for your fine-tuning job.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          method: The method used for fine-tuning.
+
+          seed: The seed controls the reproducibility of the job. Passing in the same seed and
+              job parameters should produce the same results, but may differ in rare cases. If
+              a seed is not specified, one will be generated for you.
+
+          suffix: A string of up to 64 characters that will be added to your fine-tuned model
+              name.
+
+              For example, a `suffix` of "custom-model-name" would produce a model name like
+              `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
+
+          validation_file: The ID of an uploaded file that contains validation data.
+
+              If you provide this file, the data is used to generate validation metrics
+              periodically during fine-tuning. These metrics can be viewed in the fine-tuning
+              results file. The same data should not be present in both train and validation
+              files.
+
+              Your dataset must be formatted as a JSONL file. You must upload your file with
+              the purpose `fine-tune`.
+
+              See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+              for more details.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return await self._post(
+            "/fine_tuning/jobs",
+            body=await async_maybe_transform(
+                {
+                    "model": model,
+                    "training_file": training_file,
+                    "hyperparameters": hyperparameters,
+                    "integrations": integrations,
+                    "metadata": metadata,
+                    "method": method,
+                    "seed": seed,
+                    "suffix": suffix,
+                    "validation_file": validation_file,
+                },
+                job_create_params.JobCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FineTuningJob,
+        )
+
+    async def retrieve(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FineTuningJob:
+        """
+        Get info about a fine-tuning job.
+
+        [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return await self._get(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FineTuningJob,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[FineTuningJob, AsyncCursorPage[FineTuningJob]]:
+        """
+        List your organization's fine-tuning jobs
+
+        Args:
+          after: Identifier for the last job from the previous pagination request.
+
+          limit: Number of fine-tuning jobs to retrieve.
+
+          metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
+              Alternatively, set `metadata=null` to indicate no metadata.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/fine_tuning/jobs",
+            page=AsyncCursorPage[FineTuningJob],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "metadata": metadata,
+                    },
+                    job_list_params.JobListParams,
+                ),
+            ),
+            model=FineTuningJob,
+        )
+
+    async def cancel(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> FineTuningJob:
+        """
+        Immediately cancel a fine-tune job.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return await self._post(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=FineTuningJob,
+        )
+
+    def list_events(
+        self,
+        fine_tuning_job_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[FineTuningJobEvent, AsyncCursorPage[FineTuningJobEvent]]:
+        """
+        Get status updates for a fine-tuning job.
+
+        Args:
+          after: Identifier for the last event from the previous pagination request.
+
+          limit: Number of events to retrieve.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not fine_tuning_job_id:
+            raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
+        return self._get_api_list(
+            f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
+            page=AsyncCursorPage[FineTuningJobEvent],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    job_list_events_params.JobListEventsParams,
+                ),
+            ),
+            model=FineTuningJobEvent,
+        )
+
+
+class JobsWithRawResponse:
+    def __init__(self, jobs: Jobs) -> None:
+        self._jobs = jobs
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            jobs.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            jobs.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            jobs.list,
+        )
+        self.cancel = _legacy_response.to_raw_response_wrapper(
+            jobs.cancel,
+        )
+        self.list_events = _legacy_response.to_raw_response_wrapper(
+            jobs.list_events,
+        )
+
+    @cached_property
+    def checkpoints(self) -> CheckpointsWithRawResponse:
+        return CheckpointsWithRawResponse(self._jobs.checkpoints)
+
+
+class AsyncJobsWithRawResponse:
+    def __init__(self, jobs: AsyncJobs) -> None:
+        self._jobs = jobs
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            jobs.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            jobs.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            jobs.list,
+        )
+        self.cancel = _legacy_response.async_to_raw_response_wrapper(
+            jobs.cancel,
+        )
+        self.list_events = _legacy_response.async_to_raw_response_wrapper(
+            jobs.list_events,
+        )
+
+    @cached_property
+    def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
+        return AsyncCheckpointsWithRawResponse(self._jobs.checkpoints)
+
+
+class JobsWithStreamingResponse:
+    def __init__(self, jobs: Jobs) -> None:
+        self._jobs = jobs
+
+        self.create = to_streamed_response_wrapper(
+            jobs.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            jobs.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            jobs.list,
+        )
+        self.cancel = to_streamed_response_wrapper(
+            jobs.cancel,
+        )
+        self.list_events = to_streamed_response_wrapper(
+            jobs.list_events,
+        )
+
+    @cached_property
+    def checkpoints(self) -> CheckpointsWithStreamingResponse:
+        return CheckpointsWithStreamingResponse(self._jobs.checkpoints)
+
+
+class AsyncJobsWithStreamingResponse:
+    def __init__(self, jobs: AsyncJobs) -> None:
+        self._jobs = jobs
+
+        self.create = async_to_streamed_response_wrapper(
+            jobs.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            jobs.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            jobs.list,
+        )
+        self.cancel = async_to_streamed_response_wrapper(
+            jobs.cancel,
+        )
+        self.list_events = async_to_streamed_response_wrapper(
+            jobs.list_events,
+        )
+
+    @cached_property
+    def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
+        return AsyncCheckpointsWithStreamingResponse(self._jobs.checkpoints)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/images.py b/.venv/lib/python3.12/site-packages/openai/resources/images.py
new file mode 100644
index 00000000..30473c14
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/images.py
@@ -0,0 +1,600 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Mapping, Optional, cast
+from typing_extensions import Literal
+
+import httpx
+
+from .. import _legacy_response
+from ..types import image_edit_params, image_generate_params, image_create_variation_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from .._utils import (
+    extract_files,
+    maybe_transform,
+    deepcopy_minimal,
+    async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .._base_client import make_request_options
+from ..types.image_model import ImageModel
+from ..types.images_response import ImagesResponse
+
+__all__ = ["Images", "AsyncImages"]
+
+
+class Images(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> ImagesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ImagesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ImagesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ImagesWithStreamingResponse(self)
+
+    def create_variation(
+        self,
+        *,
+        image: FileTypes,
+        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ImagesResponse:
+        """
+        Creates a variation of a given image.
+
+        Args:
+          image: The image to use as the basis for the variation(s). Must be a valid PNG file,
+              less than 4MB, and square.
+
+          model: The model to use for image generation. Only `dall-e-2` is supported at this
+              time.
+
+          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+              `n=1` is supported.
+
+          response_format: The format in which the generated images are returned. Must be one of `url` or
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
+
+          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
+              `1024x1024`.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "image": image,
+                "model": model,
+                "n": n,
+                "response_format": response_format,
+                "size": size,
+                "user": user,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return self._post(
+            "/images/variations",
+            body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ImagesResponse,
+        )
+
+    def edit(
+        self,
+        *,
+        image: FileTypes,
+        prompt: str,
+        mask: FileTypes | NotGiven = NOT_GIVEN,
+        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ImagesResponse:
+        """
+        Creates an edited or extended image given an original image and a prompt.
+
+        Args:
+          image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
+              is not provided, image must have transparency, which will be used as the mask.
+
+          prompt: A text description of the desired image(s). The maximum length is 1000
+              characters.
+
+          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
+              indicate where `image` should be edited. Must be a valid PNG file, less than
+              4MB, and have the same dimensions as `image`.
+
+          model: The model to use for image generation. Only `dall-e-2` is supported at this
+              time.
+
+          n: The number of images to generate. Must be between 1 and 10.
+
+          response_format: The format in which the generated images are returned. Must be one of `url` or
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
+
+          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
+              `1024x1024`.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "image": image,
+                "prompt": prompt,
+                "mask": mask,
+                "model": model,
+                "n": n,
+                "response_format": response_format,
+                "size": size,
+                "user": user,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return self._post(
+            "/images/edits",
+            body=maybe_transform(body, image_edit_params.ImageEditParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ImagesResponse,
+        )
+
+    def generate(
+        self,
+        *,
+        prompt: str,
+        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
+        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
+        style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ImagesResponse:
+        """
+        Creates an image given a prompt.
+
+        Args:
+          prompt: A text description of the desired image(s). The maximum length is 1000
+              characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+
+          model: The model to use for image generation.
+
+          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+              `n=1` is supported.
+
+          quality: The quality of the image that will be generated. `hd` creates images with finer
+              details and greater consistency across the image. This param is only supported
+              for `dall-e-3`.
+
+          response_format: The format in which the generated images are returned. Must be one of `url` or
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
+
+          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
+              `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
+              `1024x1792` for `dall-e-3` models.
+
+          style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
+              causes the model to lean towards generating hyper-real and dramatic images.
+              Natural causes the model to produce more natural, less hyper-real looking
+              images. This param is only supported for `dall-e-3`.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._post(
+            "/images/generations",
+            body=maybe_transform(
+                {
+                    "prompt": prompt,
+                    "model": model,
+                    "n": n,
+                    "quality": quality,
+                    "response_format": response_format,
+                    "size": size,
+                    "style": style,
+                    "user": user,
+                },
+                image_generate_params.ImageGenerateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ImagesResponse,
+        )
+
+
+class AsyncImages(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncImagesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncImagesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncImagesWithStreamingResponse(self)
+
+    async def create_variation(
+        self,
+        *,
+        image: FileTypes,
+        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ImagesResponse:
+        """
+        Creates a variation of a given image.
+
+        Args:
+          image: The image to use as the basis for the variation(s). Must be a valid PNG file,
+              less than 4MB, and square.
+
+          model: The model to use for image generation. Only `dall-e-2` is supported at this
+              time.
+
+          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+              `n=1` is supported.
+
+          response_format: The format in which the generated images are returned. Must be one of `url` or
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
+
+          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
+              `1024x1024`.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "image": image,
+                "model": model,
+                "n": n,
+                "response_format": response_format,
+                "size": size,
+                "user": user,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return await self._post(
+            "/images/variations",
+            body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ImagesResponse,
+        )
+
+    async def edit(
+        self,
+        *,
+        image: FileTypes,
+        prompt: str,
+        mask: FileTypes | NotGiven = NOT_GIVEN,
+        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ImagesResponse:
+        """
+        Creates an edited or extended image given an original image and a prompt.
+
+        Args:
+          image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
+              is not provided, image must have transparency, which will be used as the mask.
+
+          prompt: A text description of the desired image(s). The maximum length is 1000
+              characters.
+
+          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
+              indicate where `image` should be edited. Must be a valid PNG file, less than
+              4MB, and have the same dimensions as `image`.
+
+          model: The model to use for image generation. Only `dall-e-2` is supported at this
+              time.
+
+          n: The number of images to generate. Must be between 1 and 10.
+
+          response_format: The format in which the generated images are returned. Must be one of `url` or
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
+
+          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
+              `1024x1024`.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "image": image,
+                "prompt": prompt,
+                "mask": mask,
+                "model": model,
+                "n": n,
+                "response_format": response_format,
+                "size": size,
+                "user": user,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return await self._post(
+            "/images/edits",
+            body=await async_maybe_transform(body, image_edit_params.ImageEditParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ImagesResponse,
+        )
+
+    async def generate(
+        self,
+        *,
+        prompt: str,
+        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
+        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
+        style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ImagesResponse:
+        """
+        Creates an image given a prompt.
+
+        Args:
+          prompt: A text description of the desired image(s). The maximum length is 1000
+              characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+
+          model: The model to use for image generation.
+
+          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+              `n=1` is supported.
+
+          quality: The quality of the image that will be generated. `hd` creates images with finer
+              details and greater consistency across the image. This param is only supported
+              for `dall-e-3`.
+
+          response_format: The format in which the generated images are returned. Must be one of `url` or
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
+
+          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
+              `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
+              `1024x1792` for `dall-e-3` models.
+
+          style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
+              causes the model to lean towards generating hyper-real and dramatic images.
+              Natural causes the model to produce more natural, less hyper-real looking
+              images. This param is only supported for `dall-e-3`.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return await self._post(
+            "/images/generations",
+            body=await async_maybe_transform(
+                {
+                    "prompt": prompt,
+                    "model": model,
+                    "n": n,
+                    "quality": quality,
+                    "response_format": response_format,
+                    "size": size,
+                    "style": style,
+                    "user": user,
+                },
+                image_generate_params.ImageGenerateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ImagesResponse,
+        )
+
+
+class ImagesWithRawResponse:
+    def __init__(self, images: Images) -> None:
+        self._images = images
+
+        self.create_variation = _legacy_response.to_raw_response_wrapper(
+            images.create_variation,
+        )
+        self.edit = _legacy_response.to_raw_response_wrapper(
+            images.edit,
+        )
+        self.generate = _legacy_response.to_raw_response_wrapper(
+            images.generate,
+        )
+
+
+class AsyncImagesWithRawResponse:
+    def __init__(self, images: AsyncImages) -> None:
+        self._images = images
+
+        self.create_variation = _legacy_response.async_to_raw_response_wrapper(
+            images.create_variation,
+        )
+        self.edit = _legacy_response.async_to_raw_response_wrapper(
+            images.edit,
+        )
+        self.generate = _legacy_response.async_to_raw_response_wrapper(
+            images.generate,
+        )
+
+
+class ImagesWithStreamingResponse:
+    def __init__(self, images: Images) -> None:
+        self._images = images
+
+        self.create_variation = to_streamed_response_wrapper(
+            images.create_variation,
+        )
+        self.edit = to_streamed_response_wrapper(
+            images.edit,
+        )
+        self.generate = to_streamed_response_wrapper(
+            images.generate,
+        )
+
+
+class AsyncImagesWithStreamingResponse:
+    def __init__(self, images: AsyncImages) -> None:
+        self._images = images
+
+        self.create_variation = async_to_streamed_response_wrapper(
+            images.create_variation,
+        )
+        self.edit = async_to_streamed_response_wrapper(
+            images.edit,
+        )
+        self.generate = async_to_streamed_response_wrapper(
+            images.generate,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/models.py b/.venv/lib/python3.12/site-packages/openai/resources/models.py
new file mode 100644
index 00000000..a9693a6b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/models.py
@@ -0,0 +1,306 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .. import _legacy_response
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..pagination import SyncPage, AsyncPage
+from ..types.model import Model
+from .._base_client import (
+    AsyncPaginator,
+    make_request_options,
+)
+from ..types.model_deleted import ModelDeleted
+
+__all__ = ["Models", "AsyncModels"]
+
+
+class Models(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> ModelsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ModelsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ModelsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ModelsWithStreamingResponse(self)
+
+    def retrieve(
+        self,
+        model: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Model:
+        """
+        Retrieves a model instance, providing basic information about the model such as
+        the owner and permissioning.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not model:
+            raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
+        return self._get(
+            f"/models/{model}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Model,
+        )
+
+    def list(
+        self,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncPage[Model]:
+        """
+        Lists the currently available models, and provides basic information about each
+        one such as the owner and availability.
+        """
+        return self._get_api_list(
+            "/models",
+            page=SyncPage[Model],
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=Model,
+        )
+
+    def delete(
+        self,
+        model: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ModelDeleted:
+        """Delete a fine-tuned model.
+
+        You must have the Owner role in your organization to
+        delete a model.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not model:
+            raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
+        return self._delete(
+            f"/models/{model}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ModelDeleted,
+        )
+
+
+class AsyncModels(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncModelsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncModelsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncModelsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncModelsWithStreamingResponse(self)
+
+    async def retrieve(
+        self,
+        model: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Model:
+        """
+        Retrieves a model instance, providing basic information about the model such as
+        the owner and permissioning.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not model:
+            raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
+        return await self._get(
+            f"/models/{model}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Model,
+        )
+
+    def list(
+        self,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[Model, AsyncPage[Model]]:
+        """
+        Lists the currently available models, and provides basic information about each
+        one such as the owner and availability.
+        """
+        return self._get_api_list(
+            "/models",
+            page=AsyncPage[Model],
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=Model,
+        )
+
+    async def delete(
+        self,
+        model: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ModelDeleted:
+        """Delete a fine-tuned model.
+
+        You must have the Owner role in your organization to
+        delete a model.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not model:
+            raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
+        return await self._delete(
+            f"/models/{model}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ModelDeleted,
+        )
+
+
+class ModelsWithRawResponse:
+    def __init__(self, models: Models) -> None:
+        self._models = models
+
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            models.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            models.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            models.delete,
+        )
+
+
+class AsyncModelsWithRawResponse:
+    def __init__(self, models: AsyncModels) -> None:
+        self._models = models
+
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            models.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            models.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            models.delete,
+        )
+
+
+class ModelsWithStreamingResponse:
+    def __init__(self, models: Models) -> None:
+        self._models = models
+
+        self.retrieve = to_streamed_response_wrapper(
+            models.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            models.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            models.delete,
+        )
+
+
+class AsyncModelsWithStreamingResponse:
+    def __init__(self, models: AsyncModels) -> None:
+        self._models = models
+
+        self.retrieve = async_to_streamed_response_wrapper(
+            models.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            models.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            models.delete,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/moderations.py b/.venv/lib/python3.12/site-packages/openai/resources/moderations.py
new file mode 100644
index 00000000..a8f03142
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/moderations.py
@@ -0,0 +1,200 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+
+import httpx
+
+from .. import _legacy_response
+from ..types import moderation_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .._base_client import make_request_options
+from ..types.moderation_model import ModerationModel
+from ..types.moderation_create_response import ModerationCreateResponse
+from ..types.moderation_multi_modal_input_param import ModerationMultiModalInputParam
+
+__all__ = ["Moderations", "AsyncModerations"]
+
+
+class Moderations(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> ModerationsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ModerationsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ModerationsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ModerationsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]],
+        model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ModerationCreateResponse:
+        """Classifies if text and/or image inputs are potentially harmful.
+
+        Learn more in
+        the [moderation guide](https://platform.openai.com/docs/guides/moderation).
+
+        Args:
+          input: Input (or inputs) to classify. Can be a single string, an array of strings, or
+              an array of multi-modal input objects similar to other models.
+
+          model: The content moderation model you would like to use. Learn more in
+              [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
+              learn about available models
+              [here](https://platform.openai.com/docs/models#moderation).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._post(
+            "/moderations",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                },
+                moderation_create_params.ModerationCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ModerationCreateResponse,
+        )
+
+
+class AsyncModerations(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncModerationsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncModerationsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncModerationsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]],
+        model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ModerationCreateResponse:
+        """Classifies if text and/or image inputs are potentially harmful.
+
+        Learn more in
+        the [moderation guide](https://platform.openai.com/docs/guides/moderation).
+
+        Args:
+          input: Input (or inputs) to classify. Can be a single string, an array of strings, or
+              an array of multi-modal input objects similar to other models.
+
+          model: The content moderation model you would like to use. Learn more in
+              [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
+              learn about available models
+              [here](https://platform.openai.com/docs/models#moderation).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return await self._post(
+            "/moderations",
+            body=await async_maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                },
+                moderation_create_params.ModerationCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ModerationCreateResponse,
+        )
+
+
+class ModerationsWithRawResponse:
+    def __init__(self, moderations: Moderations) -> None:
+        self._moderations = moderations
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            moderations.create,
+        )
+
+
+class AsyncModerationsWithRawResponse:
+    def __init__(self, moderations: AsyncModerations) -> None:
+        self._moderations = moderations
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            moderations.create,
+        )
+
+
+class ModerationsWithStreamingResponse:
+    def __init__(self, moderations: Moderations) -> None:
+        self._moderations = moderations
+
+        self.create = to_streamed_response_wrapper(
+            moderations.create,
+        )
+
+
+class AsyncModerationsWithStreamingResponse:
+    def __init__(self, moderations: AsyncModerations) -> None:
+        self._moderations = moderations
+
+        self.create = async_to_streamed_response_wrapper(
+            moderations.create,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/responses/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/responses/__init__.py
new file mode 100644
index 00000000..ad19218b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/responses/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .responses import (
+    Responses,
+    AsyncResponses,
+    ResponsesWithRawResponse,
+    AsyncResponsesWithRawResponse,
+    ResponsesWithStreamingResponse,
+    AsyncResponsesWithStreamingResponse,
+)
+from .input_items import (
+    InputItems,
+    AsyncInputItems,
+    InputItemsWithRawResponse,
+    AsyncInputItemsWithRawResponse,
+    InputItemsWithStreamingResponse,
+    AsyncInputItemsWithStreamingResponse,
+)
+
+__all__ = [
+    "InputItems",
+    "AsyncInputItems",
+    "InputItemsWithRawResponse",
+    "AsyncInputItemsWithRawResponse",
+    "InputItemsWithStreamingResponse",
+    "AsyncInputItemsWithStreamingResponse",
+    "Responses",
+    "AsyncResponses",
+    "ResponsesWithRawResponse",
+    "AsyncResponsesWithRawResponse",
+    "ResponsesWithStreamingResponse",
+    "AsyncResponsesWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/responses/input_items.py b/.venv/lib/python3.12/site-packages/openai/resources/responses/input_items.py
new file mode 100644
index 00000000..e341393c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/responses/input_items.py
@@ -0,0 +1,223 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.responses import input_item_list_params
+from ...types.responses.response_item import ResponseItem
+
+__all__ = ["InputItems", "AsyncInputItems"]
+
+
+class InputItems(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> InputItemsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return InputItemsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> InputItemsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return InputItemsWithStreamingResponse(self)
+
+    def list(
+        self,
+        response_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[ResponseItem]:
+        """
+        Returns a list of input items for a given response.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          before: An item ID to list items before, used in pagination.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: The order to return the input items in. Default is `asc`.
+
+              - `asc`: Return the input items in ascending order.
+              - `desc`: Return the input items in descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return self._get_api_list(
+            f"/responses/{response_id}/input_items",
+            page=SyncCursorPage[ResponseItem],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    input_item_list_params.InputItemListParams,
+                ),
+            ),
+            model=cast(Any, ResponseItem),  # Union types cannot be passed in as arguments in the type system
+        )
+
+
+class AsyncInputItems(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncInputItemsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncInputItemsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncInputItemsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncInputItemsWithStreamingResponse(self)
+
+    def list(
+        self,
+        response_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[ResponseItem, AsyncCursorPage[ResponseItem]]:
+        """
+        Returns a list of input items for a given response.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          before: An item ID to list items before, used in pagination.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: The order to return the input items in. Default is `asc`.
+
+              - `asc`: Return the input items in ascending order.
+              - `desc`: Return the input items in descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return self._get_api_list(
+            f"/responses/{response_id}/input_items",
+            page=AsyncCursorPage[ResponseItem],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    input_item_list_params.InputItemListParams,
+                ),
+            ),
+            model=cast(Any, ResponseItem),  # Union types cannot be passed in as arguments in the type system
+        )
+
+
+class InputItemsWithRawResponse:
+    def __init__(self, input_items: InputItems) -> None:
+        self._input_items = input_items
+
+        self.list = _legacy_response.to_raw_response_wrapper(
+            input_items.list,
+        )
+
+
+class AsyncInputItemsWithRawResponse:
+    def __init__(self, input_items: AsyncInputItems) -> None:
+        self._input_items = input_items
+
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            input_items.list,
+        )
+
+
+class InputItemsWithStreamingResponse:
+    def __init__(self, input_items: InputItems) -> None:
+        self._input_items = input_items
+
+        self.list = to_streamed_response_wrapper(
+            input_items.list,
+        )
+
+
+class AsyncInputItemsWithStreamingResponse:
+    def __init__(self, input_items: AsyncInputItems) -> None:
+        self._input_items = input_items
+
+        self.list = async_to_streamed_response_wrapper(
+            input_items.list,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py b/.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py
new file mode 100644
index 00000000..668f4db8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py
@@ -0,0 +1,1791 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, List, Type, Union, Iterable, Optional, cast
+from functools import partial
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._utils import (
+    is_given,
+    required_args,
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .input_items import (
+    InputItems,
+    AsyncInputItems,
+    InputItemsWithRawResponse,
+    AsyncInputItemsWithRawResponse,
+    InputItemsWithStreamingResponse,
+    AsyncInputItemsWithStreamingResponse,
+)
+from ..._streaming import Stream, AsyncStream
+from ...lib._tools import PydanticFunctionTool, ResponsesPydanticFunctionTool
+from ..._base_client import make_request_options
+from ...types.responses import response_create_params, response_retrieve_params
+from ...lib._parsing._responses import (
+    TextFormatT,
+    parse_response,
+    type_to_text_format_param as _type_to_text_format_param,
+)
+from ...types.shared.chat_model import ChatModel
+from ...types.responses.response import Response
+from ...types.responses.tool_param import ToolParam, ParseableToolParam
+from ...types.shared_params.metadata import Metadata
+from ...types.shared_params.reasoning import Reasoning
+from ...types.responses.parsed_response import ParsedResponse
+from ...lib.streaming.responses._responses import ResponseStreamManager, AsyncResponseStreamManager
+from ...types.responses.response_includable import ResponseIncludable
+from ...types.shared_params.responses_model import ResponsesModel
+from ...types.responses.response_input_param import ResponseInputParam
+from ...types.responses.response_stream_event import ResponseStreamEvent
+from ...types.responses.response_text_config_param import ResponseTextConfigParam
+
+__all__ = ["Responses", "AsyncResponses"]
+
+
+class Responses(SyncAPIResource):
+    @cached_property
+    def input_items(self) -> InputItems:
+        return InputItems(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ResponsesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ResponsesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ResponsesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ResponsesWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: Literal[True],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: bool,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["input", "model"], ["input", "model", "stream"])
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
+        return self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Response,
+            stream=stream or False,
+            stream_cls=Stream[ResponseStreamEvent],
+        )
+
+    def stream(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ResponseStreamManager[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        api_request: partial[Stream[ResponseStreamEvent]] = partial(
+            self.create,
+            input=input,
+            model=model,
+            tools=tools,
+            include=include,
+            instructions=instructions,
+            max_output_tokens=max_output_tokens,
+            metadata=metadata,
+            parallel_tool_calls=parallel_tool_calls,
+            previous_response_id=previous_response_id,
+            store=store,
+            stream=True,
+            temperature=temperature,
+            text=text,
+            tool_choice=tool_choice,
+            reasoning=reasoning,
+            top_p=top_p,
+            truncation=truncation,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return ResponseStreamManager(
+            api_request,
+            text_format=text_format,
+            input_tools=tools,
+        )
+
+    def parse(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedResponse[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
+            return parse_response(
+                input_tools=tools,
+                text_format=text_format,
+                response=raw_response,
+            )
+
+        return self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `Response` instance into a `ParsedResponse`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
+        )
+
+    def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return self._get(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams),
+            ),
+            cast_to=Response,
+        )
+
+    def delete(
+        self,
+        response_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Deletes a model response with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        return self._delete(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=NoneType,
+        )
+
+
+class AsyncResponses(AsyncAPIResource):
+    @cached_property
+    def input_items(self) -> AsyncInputItems:
+        return AsyncInputItems(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncResponsesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncResponsesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncResponsesWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: Literal[True],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: bool,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["input", "model"], ["input", "model", "stream"])
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
+        return await self._post(
+            "/responses",
+            body=await async_maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Response,
+            stream=stream or False,
+            stream_cls=AsyncStream[ResponseStreamEvent],
+        )
+
+    def stream(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncResponseStreamManager[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        api_request = self.create(
+            input=input,
+            model=model,
+            tools=tools,
+            include=include,
+            instructions=instructions,
+            max_output_tokens=max_output_tokens,
+            metadata=metadata,
+            parallel_tool_calls=parallel_tool_calls,
+            previous_response_id=previous_response_id,
+            store=store,
+            stream=True,
+            temperature=temperature,
+            text=text,
+            tool_choice=tool_choice,
+            reasoning=reasoning,
+            top_p=top_p,
+            truncation=truncation,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return AsyncResponseStreamManager(
+            api_request,
+            text_format=text_format,
+            input_tools=tools,
+        )
+
+    async def parse(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedResponse[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
+            return parse_response(
+                input_tools=tools,
+                text_format=text_format,
+                response=raw_response,
+            )
+
+        return await self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `Response` instance into a `ParsedResponse`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
+        )
+
+    async def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return await self._get(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform(
+                    {"include": include}, response_retrieve_params.ResponseRetrieveParams
+                ),
+            ),
+            cast_to=Response,
+        )
+
+    async def delete(
+        self,
+        response_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Deletes a model response with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        return await self._delete(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=NoneType,
+        )
+
+
+class ResponsesWithRawResponse:
+    def __init__(self, responses: Responses) -> None:
+        self._responses = responses
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> InputItemsWithRawResponse:
+        return InputItemsWithRawResponse(self._responses.input_items)
+
+
+class AsyncResponsesWithRawResponse:
+    def __init__(self, responses: AsyncResponses) -> None:
+        self._responses = responses
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> AsyncInputItemsWithRawResponse:
+        return AsyncInputItemsWithRawResponse(self._responses.input_items)
+
+
+class ResponsesWithStreamingResponse:
+    def __init__(self, responses: Responses) -> None:
+        self._responses = responses
+
+        self.create = to_streamed_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = to_streamed_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> InputItemsWithStreamingResponse:
+        return InputItemsWithStreamingResponse(self._responses.input_items)
+
+
+class AsyncResponsesWithStreamingResponse:
+    def __init__(self, responses: AsyncResponses) -> None:
+        self._responses = responses
+
+        self.create = async_to_streamed_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> AsyncInputItemsWithStreamingResponse:
+        return AsyncInputItemsWithStreamingResponse(self._responses.input_items)
+
+
+def _make_tools(tools: Iterable[ParseableToolParam] | NotGiven) -> List[ToolParam] | NotGiven:
+    if not is_given(tools):
+        return NOT_GIVEN
+
+    converted_tools: List[ToolParam] = []
+    for tool in tools:
+        if tool["type"] != "function":
+            converted_tools.append(tool)
+            continue
+
+        if "function" not in tool:
+            # standard Responses API case
+            converted_tools.append(tool)
+            continue
+
+        function = cast(Any, tool)["function"]  # pyright: ignore[reportUnnecessaryCast]
+        if not isinstance(function, PydanticFunctionTool):
+            raise Exception(
+                "Expected Chat Completions function tool shape to be created using `openai.pydantic_function_tool()`"
+            )
+
+        assert "parameters" in function
+        new_tool = ResponsesPydanticFunctionTool(
+            {
+                "type": "function",
+                "name": function["name"],
+                "description": function.get("description"),
+                "parameters": function["parameters"],
+                "strict": function.get("strict") or False,
+            },
+            function.model,
+        )
+
+        converted_tools.append(new_tool.cast())
+
+    return converted_tools
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py.orig b/.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py.orig
new file mode 100644
index 00000000..dec4c193
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/responses/responses.py.orig
@@ -0,0 +1,1796 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, List, Type, Union, Iterable, Optional, cast
+from functools import partial
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._utils import (
+    is_given,
+    required_args,
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .input_items import (
+    InputItems,
+    AsyncInputItems,
+    InputItemsWithRawResponse,
+    AsyncInputItemsWithRawResponse,
+    InputItemsWithStreamingResponse,
+    AsyncInputItemsWithStreamingResponse,
+)
+from ..._streaming import Stream, AsyncStream
+from ...lib._tools import PydanticFunctionTool, ResponsesPydanticFunctionTool
+from ..._base_client import make_request_options
+from ...types.responses import response_create_params, response_retrieve_params
+<<<<<<< HEAD
+from ...lib._parsing._responses import (
+    TextFormatT,
+    parse_response,
+    type_to_text_format_param as _type_to_text_format_param,
+)
+from ...types.shared.chat_model import ChatModel
+||||||| parent of 001707b8 (feat(api): o1-pro now available through the API (#2228))
+from ...types.shared.chat_model import ChatModel
+=======
+>>>>>>> 001707b8 (feat(api): o1-pro now available through the API (#2228))
+from ...types.responses.response import Response
+from ...types.responses.tool_param import ToolParam, ParseableToolParam
+from ...types.shared_params.metadata import Metadata
+from ...types.shared_params.reasoning import Reasoning
+from ...types.responses.parsed_response import ParsedResponse
+from ...lib.streaming.responses._responses import ResponseStreamManager, AsyncResponseStreamManager
+from ...types.responses.response_includable import ResponseIncludable
+from ...types.shared_params.responses_model import ResponsesModel
+from ...types.responses.response_input_param import ResponseInputParam
+from ...types.responses.response_stream_event import ResponseStreamEvent
+from ...types.responses.response_text_config_param import ResponseTextConfigParam
+
+__all__ = ["Responses", "AsyncResponses"]
+
+
+class Responses(SyncAPIResource):
+    @cached_property
+    def input_items(self) -> InputItems:
+        return InputItems(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ResponsesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ResponsesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ResponsesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ResponsesWithStreamingResponse(self)
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: Literal[True],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: bool,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["input", "model"], ["input", "model", "stream"])
+    def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
+        return self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Response,
+            stream=stream or False,
+            stream_cls=Stream[ResponseStreamEvent],
+        )
+
+    def stream(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ResponseStreamManager[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        api_request: partial[Stream[ResponseStreamEvent]] = partial(
+            self.create,
+            input=input,
+            model=model,
+            tools=tools,
+            include=include,
+            instructions=instructions,
+            max_output_tokens=max_output_tokens,
+            metadata=metadata,
+            parallel_tool_calls=parallel_tool_calls,
+            previous_response_id=previous_response_id,
+            store=store,
+            stream=True,
+            temperature=temperature,
+            text=text,
+            tool_choice=tool_choice,
+            reasoning=reasoning,
+            top_p=top_p,
+            truncation=truncation,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return ResponseStreamManager(
+            api_request,
+            text_format=text_format,
+            input_tools=tools,
+        )
+
+    def parse(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedResponse[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
+            return parse_response(
+                input_tools=tools,
+                text_format=text_format,
+                response=raw_response,
+            )
+
+        return self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `Response` instance into a `ParsedResponse`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
+        )
+
+    def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return self._get(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams),
+            ),
+            cast_to=Response,
+        )
+
+    def delete(
+        self,
+        response_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Deletes a model response with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        return self._delete(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=NoneType,
+        )
+
+
+class AsyncResponses(AsyncAPIResource):
+    @cached_property
+    def input_items(self) -> AsyncInputItems:
+        return AsyncInputItems(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncResponsesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncResponsesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncResponsesWithStreamingResponse(self)
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: Literal[True],
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        stream: bool,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
+        """Creates a model response.
+
+        Provide
+        [text](https://platform.openai.com/docs/guides/text) or
+        [image](https://platform.openai.com/docs/guides/images) inputs to generate
+        [text](https://platform.openai.com/docs/guides/text) or
+        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
+        the model call your own
+        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
+        built-in [tools](https://platform.openai.com/docs/guides/tools) like
+        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
+        your own data as input for the model's response.
+
+        Args:
+          input: Text, image, or file inputs to the model, used to generate a response.
+
+              Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Image inputs](https://platform.openai.com/docs/guides/images)
+              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+              - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+          model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
+              wide range of models with different capabilities, performance characteristics,
+              and price points. Refer to the
+              [model guide](https://platform.openai.com/docs/models) to browse and compare
+              available models.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+
+          instructions: Inserts a system (or developer) message as the first item in the model's
+              context.
+
+              When using along with `previous_response_id`, the instructions from a previous
+              response will be not be carried over to the next response. This makes it simple
+              to swap out system (or developer) messages in new responses.
+
+          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+              including visible output tokens and
+              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+          previous_response_id: The unique ID of the previous response to the model. Use this to create
+              multi-turn conversations. Learn more about
+              [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+
+          reasoning: **o-series models only**
+
+              Configuration options for
+              [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+          store: Whether to store the generated model response for later retrieval via API.
+
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic. We generally recommend altering this or `top_p` but
+              not both.
+
+          text: Configuration options for a text response from the model. Can be plain text or
+              structured JSON data. Learn more:
+
+              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+          tool_choice: How the model should select which tool (or tools) to use when generating a
+              response. See the `tools` parameter to see how to specify which tools the model
+              can call.
+
+          tools: An array of tools the model may call while generating a response. You can
+              specify which tool to use by setting the `tool_choice` parameter.
+
+              The two categories of tools you can provide the model are:
+
+              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+                capabilities, like
+                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+                [file search](https://platform.openai.com/docs/guides/tools-file-search).
+                Learn more about
+                [built-in tools](https://platform.openai.com/docs/guides/tools).
+              - **Function calls (custom tools)**: Functions that are defined by you, enabling
+                the model to call your own code. Learn more about
+                [function calling](https://platform.openai.com/docs/guides/function-calling).
+
+          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+              model considers the results of the tokens with top_p probability mass. So 0.1
+              means only the tokens comprising the top 10% probability mass are considered.
+
+              We generally recommend altering this or `temperature` but not both.
+
+          truncation: The truncation strategy to use for the model response.
+
+              - `auto`: If the context of this response and previous ones exceeds the model's
+                context window size, the model will truncate the response to fit the context
+                window by dropping input items in the middle of the conversation.
+              - `disabled` (default): If a model response will exceed the context window size
+                for a model, the request will fail with a 400 error.
+
+          user: A unique identifier representing your end-user, which can help OpenAI to monitor
+              and detect abuse.
+              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @required_args(["input", "model"], ["input", "model", "stream"])
+    async def create(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: ResponsesModel,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
+        return await self._post(
+            "/responses",
+            body=await async_maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Response,
+            stream=stream or False,
+            stream_cls=AsyncStream[ResponseStreamEvent],
+        )
+
+    def stream(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncResponseStreamManager[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        api_request = self.create(
+            input=input,
+            model=model,
+            tools=tools,
+            include=include,
+            instructions=instructions,
+            max_output_tokens=max_output_tokens,
+            metadata=metadata,
+            parallel_tool_calls=parallel_tool_calls,
+            previous_response_id=previous_response_id,
+            store=store,
+            stream=True,
+            temperature=temperature,
+            text=text,
+            tool_choice=tool_choice,
+            reasoning=reasoning,
+            top_p=top_p,
+            truncation=truncation,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return AsyncResponseStreamManager(
+            api_request,
+            text_format=text_format,
+            input_tools=tools,
+        )
+
+    async def parse(
+        self,
+        *,
+        input: Union[str, ResponseInputParam],
+        model: Union[str, ChatModel],
+        text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
+        tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+        include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
+        instructions: Optional[str] | NotGiven = NOT_GIVEN,
+        max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
+        previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+        reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+        store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedResponse[TextFormatT]:
+        if is_given(text_format):
+            if not text:
+                text = {}
+
+            if "format" in text:
+                raise TypeError("Cannot mix and match text.format with text_format")
+
+            text["format"] = _type_to_text_format_param(text_format)
+
+        tools = _make_tools(tools)
+
+        def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
+            return parse_response(
+                input_tools=tools,
+                text_format=text_format,
+                response=raw_response,
+            )
+
+        return await self._post(
+            "/responses",
+            body=maybe_transform(
+                {
+                    "input": input,
+                    "model": model,
+                    "include": include,
+                    "instructions": instructions,
+                    "max_output_tokens": max_output_tokens,
+                    "metadata": metadata,
+                    "parallel_tool_calls": parallel_tool_calls,
+                    "previous_response_id": previous_response_id,
+                    "reasoning": reasoning,
+                    "store": store,
+                    "stream": stream,
+                    "temperature": temperature,
+                    "text": text,
+                    "tool_choice": tool_choice,
+                    "tools": tools,
+                    "top_p": top_p,
+                    "truncation": truncation,
+                    "user": user,
+                },
+                response_create_params.ResponseCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                post_parser=parser,
+            ),
+            # we turn the `Response` instance into a `ParsedResponse`
+            # in the `parser` function above
+            cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
+        )
+
+    async def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        return await self._get(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform(
+                    {"include": include}, response_retrieve_params.ResponseRetrieveParams
+                ),
+            ),
+            cast_to=Response,
+        )
+
+    async def delete(
+        self,
+        response_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> None:
+        """
+        Deletes a model response with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not response_id:
+            raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        return await self._delete(
+            f"/responses/{response_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=NoneType,
+        )
+
+
+class ResponsesWithRawResponse:
+    def __init__(self, responses: Responses) -> None:
+        self._responses = responses
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> InputItemsWithRawResponse:
+        return InputItemsWithRawResponse(self._responses.input_items)
+
+
+class AsyncResponsesWithRawResponse:
+    def __init__(self, responses: AsyncResponses) -> None:
+        self._responses = responses
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> AsyncInputItemsWithRawResponse:
+        return AsyncInputItemsWithRawResponse(self._responses.input_items)
+
+
+class ResponsesWithStreamingResponse:
+    def __init__(self, responses: Responses) -> None:
+        self._responses = responses
+
+        self.create = to_streamed_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = to_streamed_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> InputItemsWithStreamingResponse:
+        return InputItemsWithStreamingResponse(self._responses.input_items)
+
+
+class AsyncResponsesWithStreamingResponse:
+    def __init__(self, responses: AsyncResponses) -> None:
+        self._responses = responses
+
+        self.create = async_to_streamed_response_wrapper(
+            responses.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            responses.retrieve,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            responses.delete,
+        )
+
+    @cached_property
+    def input_items(self) -> AsyncInputItemsWithStreamingResponse:
+        return AsyncInputItemsWithStreamingResponse(self._responses.input_items)
+
+
+def _make_tools(tools: Iterable[ParseableToolParam] | NotGiven) -> List[ToolParam] | NotGiven:
+    if not is_given(tools):
+        return NOT_GIVEN
+
+    converted_tools: List[ToolParam] = []
+    for tool in tools:
+        if tool["type"] != "function":
+            converted_tools.append(tool)
+            continue
+
+        if "function" not in tool:
+            # standard Responses API case
+            converted_tools.append(tool)
+            continue
+
+        function = cast(Any, tool)["function"]  # pyright: ignore[reportUnnecessaryCast]
+        if not isinstance(function, PydanticFunctionTool):
+            raise Exception(
+                "Expected Chat Completions function tool shape to be created using `openai.pydantic_function_tool()`"
+            )
+
+        assert "parameters" in function
+        new_tool = ResponsesPydanticFunctionTool(
+            {
+                "type": "function",
+                "name": function["name"],
+                "description": function.get("description"),
+                "parameters": function["parameters"],
+                "strict": function.get("strict") or False,
+            },
+            function.model,
+        )
+
+        converted_tools.append(new_tool.cast())
+
+    return converted_tools
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/uploads/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/uploads/__init__.py
new file mode 100644
index 00000000..12d1056f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/uploads/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .parts import (
+    Parts,
+    AsyncParts,
+    PartsWithRawResponse,
+    AsyncPartsWithRawResponse,
+    PartsWithStreamingResponse,
+    AsyncPartsWithStreamingResponse,
+)
+from .uploads import (
+    Uploads,
+    AsyncUploads,
+    UploadsWithRawResponse,
+    AsyncUploadsWithRawResponse,
+    UploadsWithStreamingResponse,
+    AsyncUploadsWithStreamingResponse,
+)
+
+__all__ = [
+    "Parts",
+    "AsyncParts",
+    "PartsWithRawResponse",
+    "AsyncPartsWithRawResponse",
+    "PartsWithStreamingResponse",
+    "AsyncPartsWithStreamingResponse",
+    "Uploads",
+    "AsyncUploads",
+    "UploadsWithRawResponse",
+    "AsyncUploadsWithRawResponse",
+    "UploadsWithStreamingResponse",
+    "AsyncUploadsWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/uploads/parts.py b/.venv/lib/python3.12/site-packages/openai/resources/uploads/parts.py
new file mode 100644
index 00000000..777469ac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/uploads/parts.py
@@ -0,0 +1,210 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Mapping, cast
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
+    extract_files,
+    maybe_transform,
+    deepcopy_minimal,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..._base_client import make_request_options
+from ...types.uploads import part_create_params
+from ...types.uploads.upload_part import UploadPart
+
+__all__ = ["Parts", "AsyncParts"]
+
+
+class Parts(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> PartsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return PartsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> PartsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return PartsWithStreamingResponse(self)
+
+    def create(
+        self,
+        upload_id: str,
+        *,
+        data: FileTypes,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> UploadPart:
+        """
+        Adds a
+        [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
+        [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
+        A Part represents a chunk of bytes from the file you are trying to upload.
+
+        Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
+        maximum of 8 GB.
+
+        It is possible to add multiple Parts in parallel. You can decide the intended
+        order of the Parts when you
+        [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
+
+        Args:
+          data: The chunk of bytes for this Part.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not upload_id:
+            raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
+        body = deepcopy_minimal({"data": data})
+        files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return self._post(
+            f"/uploads/{upload_id}/parts",
+            body=maybe_transform(body, part_create_params.PartCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=UploadPart,
+        )
+
+
+class AsyncParts(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncPartsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncPartsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncPartsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncPartsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        upload_id: str,
+        *,
+        data: FileTypes,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> UploadPart:
+        """
+        Adds a
+        [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
+        [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
+        A Part represents a chunk of bytes from the file you are trying to upload.
+
+        Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
+        maximum of 8 GB.
+
+        It is possible to add multiple Parts in parallel. You can decide the intended
+        order of the Parts when you
+        [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
+
+        Args:
+          data: The chunk of bytes for this Part.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not upload_id:
+            raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
+        body = deepcopy_minimal({"data": data})
+        files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
+        # It should be noted that the actual Content-Type header that will be
+        # sent to the server will contain a `boundary` parameter, e.g.
+        # multipart/form-data; boundary=---abc--
+        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return await self._post(
+            f"/uploads/{upload_id}/parts",
+            body=await async_maybe_transform(body, part_create_params.PartCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=UploadPart,
+        )
+
+
+class PartsWithRawResponse:
+    def __init__(self, parts: Parts) -> None:
+        self._parts = parts
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            parts.create,
+        )
+
+
+class AsyncPartsWithRawResponse:
+    def __init__(self, parts: AsyncParts) -> None:
+        self._parts = parts
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            parts.create,
+        )
+
+
+class PartsWithStreamingResponse:
+    def __init__(self, parts: Parts) -> None:
+        self._parts = parts
+
+        self.create = to_streamed_response_wrapper(
+            parts.create,
+        )
+
+
+class AsyncPartsWithStreamingResponse:
+    def __init__(self, parts: AsyncParts) -> None:
+        self._parts = parts
+
+        self.create = async_to_streamed_response_wrapper(
+            parts.create,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/uploads/uploads.py b/.venv/lib/python3.12/site-packages/openai/resources/uploads/uploads.py
new file mode 100644
index 00000000..9297dbc2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/uploads/uploads.py
@@ -0,0 +1,714 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import io
+import os
+import logging
+import builtins
+from typing import List, overload
+from pathlib import Path
+
+import anyio
+import httpx
+
+from ... import _legacy_response
+from .parts import (
+    Parts,
+    AsyncParts,
+    PartsWithRawResponse,
+    AsyncPartsWithRawResponse,
+    PartsWithStreamingResponse,
+    AsyncPartsWithStreamingResponse,
+)
+from ...types import FilePurpose, upload_create_params, upload_complete_params
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..._base_client import make_request_options
+from ...types.upload import Upload
+from ...types.file_purpose import FilePurpose
+
+__all__ = ["Uploads", "AsyncUploads"]
+
+
+# 64MB
+DEFAULT_PART_SIZE = 64 * 1024 * 1024
+
+log: logging.Logger = logging.getLogger(__name__)
+
+
+class Uploads(SyncAPIResource):
+    @cached_property
+    def parts(self) -> Parts:
+        return Parts(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> UploadsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return UploadsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> UploadsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return UploadsWithStreamingResponse(self)
+
+    @overload
+    def upload_file_chunked(
+        self,
+        *,
+        file: os.PathLike[str],
+        mime_type: str,
+        purpose: FilePurpose,
+        bytes: int | None = None,
+        part_size: int | None = None,
+        md5: str | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Splits a file into multiple 64MB parts and uploads them sequentially."""
+
+    @overload
+    def upload_file_chunked(
+        self,
+        *,
+        file: bytes,
+        filename: str,
+        bytes: int,
+        mime_type: str,
+        purpose: FilePurpose,
+        part_size: int | None = None,
+        md5: str | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Splits an in-memory file into multiple 64MB parts and uploads them sequentially."""
+
+    def upload_file_chunked(
+        self,
+        *,
+        file: os.PathLike[str] | bytes,
+        mime_type: str,
+        purpose: FilePurpose,
+        filename: str | None = None,
+        bytes: int | None = None,
+        part_size: int | None = None,
+        md5: str | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Splits the given file into multiple parts and uploads them sequentially.
+
+        ```py
+        from pathlib import Path
+
+        client.uploads.upload_file(
+            file=Path("my-paper.pdf"),
+            mime_type="pdf",
+            purpose="assistants",
+        )
+        ```
+        """
+        if isinstance(file, builtins.bytes):
+            if filename is None:
+                raise TypeError("The `filename` argument must be given for in-memory files")
+
+            if bytes is None:
+                raise TypeError("The `bytes` argument must be given for in-memory files")
+        else:
+            if not isinstance(file, Path):
+                file = Path(file)
+
+            if not filename:
+                filename = file.name
+
+            if bytes is None:
+                bytes = file.stat().st_size
+
+        upload = self.create(
+            bytes=bytes,
+            filename=filename,
+            mime_type=mime_type,
+            purpose=purpose,
+        )
+
+        part_ids: list[str] = []
+
+        if part_size is None:
+            part_size = DEFAULT_PART_SIZE
+
+        if isinstance(file, builtins.bytes):
+            buf: io.FileIO | io.BytesIO = io.BytesIO(file)
+        else:
+            buf = io.FileIO(file)
+
+        try:
+            while True:
+                data = buf.read(part_size)
+                if not data:
+                    # EOF
+                    break
+
+                part = self.parts.create(upload_id=upload.id, data=data)
+                log.info("Uploaded part %s for upload %s", part.id, upload.id)
+                part_ids.append(part.id)
+        except Exception:
+            buf.close()
+            raise
+
+        return self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5)
+
+    def create(
+        self,
+        *,
+        bytes: int,
+        filename: str,
+        mime_type: str,
+        purpose: FilePurpose,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """
+        Creates an intermediate
+        [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object
+        that you can add
+        [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.
+        Currently, an Upload can accept at most 8 GB in total and expires after an hour
+        after you create it.
+
+        Once you complete the Upload, we will create a
+        [File](https://platform.openai.com/docs/api-reference/files/object) object that
+        contains all the parts you uploaded. This File is usable in the rest of our
+        platform as a regular File object.
+
+        For certain `purpose` values, the correct `mime_type` must be specified. Please
+        refer to documentation for the
+        [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).
+
+        For guidance on the proper filename extensions for each purpose, please follow
+        the documentation on
+        [creating a File](https://platform.openai.com/docs/api-reference/files/create).
+
+        Args:
+          bytes: The number of bytes in the file you are uploading.
+
+          filename: The name of the file to upload.
+
+          mime_type: The MIME type of the file.
+
+              This must fall within the supported MIME types for your file purpose. See the
+              supported MIME types for assistants and vision.
+
+          purpose: The intended purpose of the uploaded file.
+
+              See the
+              [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._post(
+            "/uploads",
+            body=maybe_transform(
+                {
+                    "bytes": bytes,
+                    "filename": filename,
+                    "mime_type": mime_type,
+                    "purpose": purpose,
+                },
+                upload_create_params.UploadCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Upload,
+        )
+
+    def cancel(
+        self,
+        upload_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Cancels the Upload.
+
+        No Parts may be added after an Upload is cancelled.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not upload_id:
+            raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
+        return self._post(
+            f"/uploads/{upload_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Upload,
+        )
+
+    def complete(
+        self,
+        upload_id: str,
+        *,
+        part_ids: List[str],
+        md5: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """
+        Completes the
+        [Upload](https://platform.openai.com/docs/api-reference/uploads/object).
+
+        Within the returned Upload object, there is a nested
+        [File](https://platform.openai.com/docs/api-reference/files/object) object that
+        is ready to use in the rest of the platform.
+
+        You can specify the order of the Parts by passing in an ordered list of the Part
+        IDs.
+
+        The number of bytes uploaded upon completion must match the number of bytes
+        initially specified when creating the Upload object. No Parts may be added after
+        an Upload is completed.
+
+        Args:
+          part_ids: The ordered list of Part IDs.
+
+          md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
+              matches what you expect.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not upload_id:
+            raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
+        return self._post(
+            f"/uploads/{upload_id}/complete",
+            body=maybe_transform(
+                {
+                    "part_ids": part_ids,
+                    "md5": md5,
+                },
+                upload_complete_params.UploadCompleteParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Upload,
+        )
+
+
+class AsyncUploads(AsyncAPIResource):
+    @cached_property
+    def parts(self) -> AsyncParts:
+        return AsyncParts(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncUploadsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncUploadsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncUploadsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncUploadsWithStreamingResponse(self)
+
+    @overload
+    async def upload_file_chunked(
+        self,
+        *,
+        file: os.PathLike[str],
+        mime_type: str,
+        purpose: FilePurpose,
+        bytes: int | None = None,
+        part_size: int | None = None,
+        md5: str | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Splits a file into multiple 64MB parts and uploads them sequentially."""
+
+    @overload
+    async def upload_file_chunked(
+        self,
+        *,
+        file: bytes,
+        filename: str,
+        bytes: int,
+        mime_type: str,
+        purpose: FilePurpose,
+        part_size: int | None = None,
+        md5: str | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Splits an in-memory file into multiple 64MB parts and uploads them sequentially."""
+
+    async def upload_file_chunked(
+        self,
+        *,
+        file: os.PathLike[str] | bytes,
+        mime_type: str,
+        purpose: FilePurpose,
+        filename: str | None = None,
+        bytes: int | None = None,
+        part_size: int | None = None,
+        md5: str | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Splits the given file into multiple parts and uploads them sequentially.
+
+        ```py
+        from pathlib import Path
+
+        client.uploads.upload_file(
+            file=Path("my-paper.pdf"),
+            mime_type="pdf",
+            purpose="assistants",
+        )
+        ```
+        """
+        if isinstance(file, builtins.bytes):
+            if filename is None:
+                raise TypeError("The `filename` argument must be given for in-memory files")
+
+            if bytes is None:
+                raise TypeError("The `bytes` argument must be given for in-memory files")
+        else:
+            if not isinstance(file, anyio.Path):
+                file = anyio.Path(file)
+
+            if not filename:
+                filename = file.name
+
+            if bytes is None:
+                stat = await file.stat()
+                bytes = stat.st_size
+
+        upload = await self.create(
+            bytes=bytes,
+            filename=filename,
+            mime_type=mime_type,
+            purpose=purpose,
+        )
+
+        part_ids: list[str] = []
+
+        if part_size is None:
+            part_size = DEFAULT_PART_SIZE
+
+        if isinstance(file, anyio.Path):
+            fd = await file.open("rb")
+            async with fd:
+                while True:
+                    data = await fd.read(part_size)
+                    if not data:
+                        # EOF
+                        break
+
+                    part = await self.parts.create(upload_id=upload.id, data=data)
+                    log.info("Uploaded part %s for upload %s", part.id, upload.id)
+                    part_ids.append(part.id)
+        else:
+            buf = io.BytesIO(file)
+
+            try:
+                while True:
+                    data = buf.read(part_size)
+                    if not data:
+                        # EOF
+                        break
+
+                    part = await self.parts.create(upload_id=upload.id, data=data)
+                    log.info("Uploaded part %s for upload %s", part.id, upload.id)
+                    part_ids.append(part.id)
+            except Exception:
+                buf.close()
+                raise
+
+        return await self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5)
+
+    async def create(
+        self,
+        *,
+        bytes: int,
+        filename: str,
+        mime_type: str,
+        purpose: FilePurpose,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """
+        Creates an intermediate
+        [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object
+        that you can add
+        [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.
+        Currently, an Upload can accept at most 8 GB in total and expires after an hour
+        after you create it.
+
+        Once you complete the Upload, we will create a
+        [File](https://platform.openai.com/docs/api-reference/files/object) object that
+        contains all the parts you uploaded. This File is usable in the rest of our
+        platform as a regular File object.
+
+        For certain `purpose` values, the correct `mime_type` must be specified. Please
+        refer to documentation for the
+        [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).
+
+        For guidance on the proper filename extensions for each purpose, please follow
+        the documentation on
+        [creating a File](https://platform.openai.com/docs/api-reference/files/create).
+
+        Args:
+          bytes: The number of bytes in the file you are uploading.
+
+          filename: The name of the file to upload.
+
+          mime_type: The MIME type of the file.
+
+              This must fall within the supported MIME types for your file purpose. See the
+              supported MIME types for assistants and vision.
+
+          purpose: The intended purpose of the uploaded file.
+
+              See the
+              [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return await self._post(
+            "/uploads",
+            body=await async_maybe_transform(
+                {
+                    "bytes": bytes,
+                    "filename": filename,
+                    "mime_type": mime_type,
+                    "purpose": purpose,
+                },
+                upload_create_params.UploadCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Upload,
+        )
+
+    async def cancel(
+        self,
+        upload_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """Cancels the Upload.
+
+        No Parts may be added after an Upload is cancelled.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not upload_id:
+            raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
+        return await self._post(
+            f"/uploads/{upload_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Upload,
+        )
+
+    async def complete(
+        self,
+        upload_id: str,
+        *,
+        part_ids: List[str],
+        md5: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Upload:
+        """
+        Completes the
+        [Upload](https://platform.openai.com/docs/api-reference/uploads/object).
+
+        Within the returned Upload object, there is a nested
+        [File](https://platform.openai.com/docs/api-reference/files/object) object that
+        is ready to use in the rest of the platform.
+
+        You can specify the order of the Parts by passing in an ordered list of the Part
+        IDs.
+
+        The number of bytes uploaded upon completion must match the number of bytes
+        initially specified when creating the Upload object. No Parts may be added after
+        an Upload is completed.
+
+        Args:
+          part_ids: The ordered list of Part IDs.
+
+          md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
+              matches what you expect.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not upload_id:
+            raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
+        return await self._post(
+            f"/uploads/{upload_id}/complete",
+            body=await async_maybe_transform(
+                {
+                    "part_ids": part_ids,
+                    "md5": md5,
+                },
+                upload_complete_params.UploadCompleteParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Upload,
+        )
+
+
+class UploadsWithRawResponse:
+    def __init__(self, uploads: Uploads) -> None:
+        self._uploads = uploads
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            uploads.create,
+        )
+        self.cancel = _legacy_response.to_raw_response_wrapper(
+            uploads.cancel,
+        )
+        self.complete = _legacy_response.to_raw_response_wrapper(
+            uploads.complete,
+        )
+
+    @cached_property
+    def parts(self) -> PartsWithRawResponse:
+        return PartsWithRawResponse(self._uploads.parts)
+
+
+class AsyncUploadsWithRawResponse:
+    def __init__(self, uploads: AsyncUploads) -> None:
+        self._uploads = uploads
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            uploads.create,
+        )
+        self.cancel = _legacy_response.async_to_raw_response_wrapper(
+            uploads.cancel,
+        )
+        self.complete = _legacy_response.async_to_raw_response_wrapper(
+            uploads.complete,
+        )
+
+    @cached_property
+    def parts(self) -> AsyncPartsWithRawResponse:
+        return AsyncPartsWithRawResponse(self._uploads.parts)
+
+
+class UploadsWithStreamingResponse:
+    def __init__(self, uploads: Uploads) -> None:
+        self._uploads = uploads
+
+        self.create = to_streamed_response_wrapper(
+            uploads.create,
+        )
+        self.cancel = to_streamed_response_wrapper(
+            uploads.cancel,
+        )
+        self.complete = to_streamed_response_wrapper(
+            uploads.complete,
+        )
+
+    @cached_property
+    def parts(self) -> PartsWithStreamingResponse:
+        return PartsWithStreamingResponse(self._uploads.parts)
+
+
+class AsyncUploadsWithStreamingResponse:
+    def __init__(self, uploads: AsyncUploads) -> None:
+        self._uploads = uploads
+
+        self.create = async_to_streamed_response_wrapper(
+            uploads.create,
+        )
+        self.cancel = async_to_streamed_response_wrapper(
+            uploads.cancel,
+        )
+        self.complete = async_to_streamed_response_wrapper(
+            uploads.complete,
+        )
+
+    @cached_property
+    def parts(self) -> AsyncPartsWithStreamingResponse:
+        return AsyncPartsWithStreamingResponse(self._uploads.parts)
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py
new file mode 100644
index 00000000..96ae16c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .files import (
+    Files,
+    AsyncFiles,
+    FilesWithRawResponse,
+    AsyncFilesWithRawResponse,
+    FilesWithStreamingResponse,
+    AsyncFilesWithStreamingResponse,
+)
+from .file_batches import (
+    FileBatches,
+    AsyncFileBatches,
+    FileBatchesWithRawResponse,
+    AsyncFileBatchesWithRawResponse,
+    FileBatchesWithStreamingResponse,
+    AsyncFileBatchesWithStreamingResponse,
+)
+from .vector_stores import (
+    VectorStores,
+    AsyncVectorStores,
+    VectorStoresWithRawResponse,
+    AsyncVectorStoresWithRawResponse,
+    VectorStoresWithStreamingResponse,
+    AsyncVectorStoresWithStreamingResponse,
+)
+
+__all__ = [
+    "Files",
+    "AsyncFiles",
+    "FilesWithRawResponse",
+    "AsyncFilesWithRawResponse",
+    "FilesWithStreamingResponse",
+    "AsyncFilesWithStreamingResponse",
+    "FileBatches",
+    "AsyncFileBatches",
+    "FileBatchesWithRawResponse",
+    "AsyncFileBatchesWithRawResponse",
+    "FileBatchesWithStreamingResponse",
+    "AsyncFileBatchesWithStreamingResponse",
+    "VectorStores",
+    "AsyncVectorStores",
+    "VectorStoresWithRawResponse",
+    "AsyncVectorStoresWithRawResponse",
+    "VectorStoresWithStreamingResponse",
+    "AsyncVectorStoresWithStreamingResponse",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py
new file mode 100644
index 00000000..9b4b64d3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/file_batches.py
@@ -0,0 +1,801 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import asyncio
+from typing import Dict, List, Iterable, Optional
+from typing_extensions import Union, Literal
+from concurrent.futures import Future, ThreadPoolExecutor, as_completed
+
+import httpx
+import sniffio
+
+from ... import _legacy_response
+from ...types import FileChunkingStrategyParam
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
+    is_given,
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.file_object import FileObject
+from ...types.vector_stores import file_batch_create_params, file_batch_list_files_params
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_stores.vector_store_file import VectorStoreFile
+from ...types.vector_stores.vector_store_file_batch import VectorStoreFileBatch
+
+__all__ = ["FileBatches", "AsyncFileBatches"]
+
+
+class FileBatches(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> FileBatchesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return FileBatchesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> FileBatchesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return FileBatchesWithStreamingResponse(self)
+
+    def create(
+        self,
+        vector_store_id: str,
+        *,
+        file_ids: List[str],
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """
+        Create a vector store file batch.
+
+        Args:
+          file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+              the vector store should use. Useful for tools like `file_search` that can access
+              files.
+
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+              strategy. Only applicable if `file_ids` is non-empty.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/vector_stores/{vector_store_id}/file_batches",
+            body=maybe_transform(
+                {
+                    "file_ids": file_ids,
+                    "attributes": attributes,
+                    "chunking_strategy": chunking_strategy,
+                },
+                file_batch_create_params.FileBatchCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileBatch,
+        )
+
+    def retrieve(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """
+        Retrieves a vector store file batch.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileBatch,
+        )
+
+    def cancel(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Cancel a vector store file batch.
+
+        This attempts to cancel the processing of
+        files in this batch as soon as possible.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileBatch,
+        )
+
+    def create_and_poll(
+        self,
+        vector_store_id: str,
+        *,
+        file_ids: List[str],
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Create a vector store batch and poll until all files have been processed."""
+        batch = self.create(
+            vector_store_id=vector_store_id,
+            file_ids=file_ids,
+            chunking_strategy=chunking_strategy,
+        )
+        # TODO: don't poll unless necessary??
+        return self.poll(
+            batch.id,
+            vector_store_id=vector_store_id,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    def list_files(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[VectorStoreFile]:
+        """
+        Returns a list of vector store files in a batch.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+            page=SyncCursorPage[VectorStoreFile],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "filter": filter,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    file_batch_list_files_params.FileBatchListFilesParams,
+                ),
+            ),
+            model=VectorStoreFile,
+        )
+
+    def poll(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Wait for the given file batch to be processed.
+
+        Note: this will return even if one of the files failed to process, you need to
+        check batch.file_counts.failed_count to handle this case.
+        """
+        headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+        if is_given(poll_interval_ms):
+            headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        while True:
+            response = self.with_raw_response.retrieve(
+                batch_id,
+                vector_store_id=vector_store_id,
+                extra_headers=headers,
+            )
+
+            batch = response.parse()
+            if batch.file_counts.in_progress > 0:
+                if not is_given(poll_interval_ms):
+                    from_header = response.headers.get("openai-poll-after-ms")
+                    if from_header is not None:
+                        poll_interval_ms = int(from_header)
+                    else:
+                        poll_interval_ms = 1000
+
+                self._sleep(poll_interval_ms / 1000)
+                continue
+
+            return batch
+
+    def upload_and_poll(
+        self,
+        vector_store_id: str,
+        *,
+        files: Iterable[FileTypes],
+        max_concurrency: int = 5,
+        file_ids: List[str] = [],
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Uploads the given files concurrently and then creates a vector store file batch.
+
+        If you've already uploaded certain files that you want to include in this batch
+        then you can pass their IDs through the `file_ids` argument.
+
+        By default, if any file upload fails then an exception will be eagerly raised.
+
+        The number of concurrency uploads is configurable using the `max_concurrency`
+        parameter.
+
+        Note: this method only supports `asyncio` or `trio` as the backing async
+        runtime.
+        """
+        results: list[FileObject] = []
+
+        with ThreadPoolExecutor(max_workers=max_concurrency) as executor:
+            futures: list[Future[FileObject]] = [
+                executor.submit(
+                    self._client.files.create,
+                    file=file,
+                    purpose="assistants",
+                )
+                for file in files
+            ]
+
+        for future in as_completed(futures):
+            exc = future.exception()
+            if exc:
+                raise exc
+
+            results.append(future.result())
+
+        batch = self.create_and_poll(
+            vector_store_id=vector_store_id,
+            file_ids=[*file_ids, *(f.id for f in results)],
+            poll_interval_ms=poll_interval_ms,
+            chunking_strategy=chunking_strategy,
+        )
+        return batch
+
+
+class AsyncFileBatches(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncFileBatchesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncFileBatchesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncFileBatchesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncFileBatchesWithStreamingResponse(self)
+
+    async def create(
+        self,
+        vector_store_id: str,
+        *,
+        file_ids: List[str],
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """
+        Create a vector store file batch.
+
+        Args:
+          file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+              the vector store should use. Useful for tools like `file_search` that can access
+              files.
+
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+              strategy. Only applicable if `file_ids` is non-empty.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/vector_stores/{vector_store_id}/file_batches",
+            body=await async_maybe_transform(
+                {
+                    "file_ids": file_ids,
+                    "attributes": attributes,
+                    "chunking_strategy": chunking_strategy,
+                },
+                file_batch_create_params.FileBatchCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileBatch,
+        )
+
+    async def retrieve(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """
+        Retrieves a vector store file batch.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileBatch,
+        )
+
+    async def cancel(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Cancel a vector store file batch.
+
+        This attempts to cancel the processing of
+        files in this batch as soon as possible.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileBatch,
+        )
+
+    async def create_and_poll(
+        self,
+        vector_store_id: str,
+        *,
+        file_ids: List[str],
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Create a vector store batch and poll until all files have been processed."""
+        batch = await self.create(
+            vector_store_id=vector_store_id,
+            file_ids=file_ids,
+            chunking_strategy=chunking_strategy,
+        )
+        # TODO: don't poll unless necessary??
+        return await self.poll(
+            batch.id,
+            vector_store_id=vector_store_id,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    def list_files(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
+        """
+        Returns a list of vector store files in a batch.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not batch_id:
+            raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+            page=AsyncCursorPage[VectorStoreFile],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "filter": filter,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    file_batch_list_files_params.FileBatchListFilesParams,
+                ),
+            ),
+            model=VectorStoreFile,
+        )
+
+    async def poll(
+        self,
+        batch_id: str,
+        *,
+        vector_store_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Wait for the given file batch to be processed.
+
+        Note: this will return even if one of the files failed to process, you need to
+        check batch.file_counts.failed_count to handle this case.
+        """
+        headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+        if is_given(poll_interval_ms):
+            headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        while True:
+            response = await self.with_raw_response.retrieve(
+                batch_id,
+                vector_store_id=vector_store_id,
+                extra_headers=headers,
+            )
+
+            batch = response.parse()
+            if batch.file_counts.in_progress > 0:
+                if not is_given(poll_interval_ms):
+                    from_header = response.headers.get("openai-poll-after-ms")
+                    if from_header is not None:
+                        poll_interval_ms = int(from_header)
+                    else:
+                        poll_interval_ms = 1000
+
+                await self._sleep(poll_interval_ms / 1000)
+                continue
+
+            return batch
+
+    async def upload_and_poll(
+        self,
+        vector_store_id: str,
+        *,
+        files: Iterable[FileTypes],
+        max_concurrency: int = 5,
+        file_ids: List[str] = [],
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileBatch:
+        """Uploads the given files concurrently and then creates a vector store file batch.
+
+        If you've already uploaded certain files that you want to include in this batch
+        then you can pass their IDs through the `file_ids` argument.
+
+        By default, if any file upload fails then an exception will be eagerly raised.
+
+        The number of concurrency uploads is configurable using the `max_concurrency`
+        parameter.
+
+        Note: this method only supports `asyncio` or `trio` as the backing async
+        runtime.
+        """
+        uploaded_files: list[FileObject] = []
+
+        async_library = sniffio.current_async_library()
+
+        if async_library == "asyncio":
+
+            async def asyncio_upload_file(semaphore: asyncio.Semaphore, file: FileTypes) -> None:
+                async with semaphore:
+                    file_obj = await self._client.files.create(
+                        file=file,
+                        purpose="assistants",
+                    )
+                    uploaded_files.append(file_obj)
+
+            semaphore = asyncio.Semaphore(max_concurrency)
+
+            tasks = [asyncio_upload_file(semaphore, file) for file in files]
+
+            await asyncio.gather(*tasks)
+        elif async_library == "trio":
+            # We only import if the library is being used.
+            # We support Python 3.7 so are using an older version of trio that does not have type information
+            import trio  # type: ignore # pyright: ignore[reportMissingTypeStubs]
+
+            async def trio_upload_file(limiter: trio.CapacityLimiter, file: FileTypes) -> None:
+                async with limiter:
+                    file_obj = await self._client.files.create(
+                        file=file,
+                        purpose="assistants",
+                    )
+                    uploaded_files.append(file_obj)
+
+            limiter = trio.CapacityLimiter(max_concurrency)
+
+            async with trio.open_nursery() as nursery:
+                for file in files:
+                    nursery.start_soon(trio_upload_file, limiter, file)  # pyright: ignore [reportUnknownMemberType]
+        else:
+            raise RuntimeError(
+                f"Async runtime {async_library} is not supported yet. Only asyncio or trio is supported",
+            )
+
+        batch = await self.create_and_poll(
+            vector_store_id=vector_store_id,
+            file_ids=[*file_ids, *(f.id for f in uploaded_files)],
+            poll_interval_ms=poll_interval_ms,
+            chunking_strategy=chunking_strategy,
+        )
+        return batch
+
+
+class FileBatchesWithRawResponse:
+    def __init__(self, file_batches: FileBatches) -> None:
+        self._file_batches = file_batches
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            file_batches.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            file_batches.retrieve,
+        )
+        self.cancel = _legacy_response.to_raw_response_wrapper(
+            file_batches.cancel,
+        )
+        self.list_files = _legacy_response.to_raw_response_wrapper(
+            file_batches.list_files,
+        )
+
+
+class AsyncFileBatchesWithRawResponse:
+    def __init__(self, file_batches: AsyncFileBatches) -> None:
+        self._file_batches = file_batches
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            file_batches.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            file_batches.retrieve,
+        )
+        self.cancel = _legacy_response.async_to_raw_response_wrapper(
+            file_batches.cancel,
+        )
+        self.list_files = _legacy_response.async_to_raw_response_wrapper(
+            file_batches.list_files,
+        )
+
+
+class FileBatchesWithStreamingResponse:
+    def __init__(self, file_batches: FileBatches) -> None:
+        self._file_batches = file_batches
+
+        self.create = to_streamed_response_wrapper(
+            file_batches.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            file_batches.retrieve,
+        )
+        self.cancel = to_streamed_response_wrapper(
+            file_batches.cancel,
+        )
+        self.list_files = to_streamed_response_wrapper(
+            file_batches.list_files,
+        )
+
+
+class AsyncFileBatchesWithStreamingResponse:
+    def __init__(self, file_batches: AsyncFileBatches) -> None:
+        self._file_batches = file_batches
+
+        self.create = async_to_streamed_response_wrapper(
+            file_batches.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            file_batches.retrieve,
+        )
+        self.cancel = async_to_streamed_response_wrapper(
+            file_batches.cancel,
+        )
+        self.list_files = async_to_streamed_response_wrapper(
+            file_batches.list_files,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py
new file mode 100644
index 00000000..7d93798a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/files.py
@@ -0,0 +1,933 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Dict, Union, Optional
+from typing_extensions import Literal, assert_never
+
+import httpx
+
+from ... import _legacy_response
+from ...types import FileChunkingStrategyParam
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ..._utils import (
+    is_given,
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.vector_stores import file_list_params, file_create_params, file_update_params
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_stores.vector_store_file import VectorStoreFile
+from ...types.vector_stores.file_content_response import FileContentResponse
+from ...types.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted
+
+__all__ = ["Files", "AsyncFiles"]
+
+
+class Files(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> FilesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return FilesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> FilesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return FilesWithStreamingResponse(self)
+
+    def create(
+        self,
+        vector_store_id: str,
+        *,
+        file_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Create a vector store file by attaching a
+        [File](https://platform.openai.com/docs/api-reference/files) to a
+        [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
+
+        Args:
+          file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+              vector store should use. Useful for tools like `file_search` that can access
+              files.
+
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+              strategy. Only applicable if `file_ids` is non-empty.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/vector_stores/{vector_store_id}/files",
+            body=maybe_transform(
+                {
+                    "file_id": file_id,
+                    "attributes": attributes,
+                    "chunking_strategy": chunking_strategy,
+                },
+                file_create_params.FileCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
+    def retrieve(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Retrieves a vector store file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
+    def update(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Update attributes on a vector store file.
+
+        Args:
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
+    def list(
+        self,
+        vector_store_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[VectorStoreFile]:
+        """
+        Returns a list of vector store files.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/files",
+            page=SyncCursorPage[VectorStoreFile],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "filter": filter,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    file_list_params.FileListParams,
+                ),
+            ),
+            model=VectorStoreFile,
+        )
+
+    def delete(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileDeleted:
+        """Delete a vector store file.
+
+        This will remove the file from the vector store but
+        the file itself will not be deleted. To delete the file, use the
+        [delete file](https://platform.openai.com/docs/api-reference/files/delete)
+        endpoint.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._delete(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileDeleted,
+        )
+
+    def create_and_poll(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Attach a file to the given vector store and wait for it to be processed."""
+        self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy)
+
+        return self.poll(
+            file_id,
+            vector_store_id=vector_store_id,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    def poll(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Wait for the vector store file to finish processing.
+
+        Note: this will return even if the file failed to process, you need to check
+        file.last_error and file.status to handle these cases
+        """
+        headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+        if is_given(poll_interval_ms):
+            headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        while True:
+            response = self.with_raw_response.retrieve(
+                file_id,
+                vector_store_id=vector_store_id,
+                extra_headers=headers,
+            )
+
+            file = response.parse()
+            if file.status == "in_progress":
+                if not is_given(poll_interval_ms):
+                    from_header = response.headers.get("openai-poll-after-ms")
+                    if from_header is not None:
+                        poll_interval_ms = int(from_header)
+                    else:
+                        poll_interval_ms = 1000
+
+                self._sleep(poll_interval_ms / 1000)
+            elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
+                return file
+            else:
+                if TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(file.status)
+                else:
+                    return file
+
+    def upload(
+        self,
+        *,
+        vector_store_id: str,
+        file: FileTypes,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Upload a file to the `files` API and then attach it to the given vector store.
+
+        Note the file will be asynchronously processed (you can use the alternative
+        polling helper method to wait for processing to complete).
+        """
+        file_obj = self._client.files.create(file=file, purpose="assistants")
+        return self.create(vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy)
+
+    def upload_and_poll(
+        self,
+        *,
+        vector_store_id: str,
+        file: FileTypes,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Add a file to a vector store and poll until processing is complete."""
+        file_obj = self._client.files.create(file=file, purpose="assistants")
+        return self.create_and_poll(
+            vector_store_id=vector_store_id,
+            file_id=file_obj.id,
+            chunking_strategy=chunking_strategy,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    def content(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncPage[FileContentResponse]:
+        """
+        Retrieve the parsed contents of a vector store file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+            page=SyncPage[FileContentResponse],
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=FileContentResponse,
+        )
+
+
+class AsyncFiles(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncFilesWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncFilesWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncFilesWithStreamingResponse(self)
+
+    async def create(
+        self,
+        vector_store_id: str,
+        *,
+        file_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Create a vector store file by attaching a
+        [File](https://platform.openai.com/docs/api-reference/files) to a
+        [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
+
+        Args:
+          file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+              vector store should use. Useful for tools like `file_search` that can access
+              files.
+
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+              strategy. Only applicable if `file_ids` is non-empty.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/vector_stores/{vector_store_id}/files",
+            body=await async_maybe_transform(
+                {
+                    "file_id": file_id,
+                    "attributes": attributes,
+                    "chunking_strategy": chunking_strategy,
+                },
+                file_create_params.FileCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
+    async def retrieve(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Retrieves a vector store file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
+    async def update(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        attributes: Optional[Dict[str, Union[str, float, bool]]],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """
+        Update attributes on a vector store file.
+
+        Args:
+          attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters, booleans, or numbers.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFile,
+        )
+
+    def list(
+        self,
+        vector_store_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
+        """
+        Returns a list of vector store files.
+
+        Args:
+          after: A cursor for use in pagination. `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/files",
+            page=AsyncCursorPage[VectorStoreFile],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "filter": filter,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    file_list_params.FileListParams,
+                ),
+            ),
+            model=VectorStoreFile,
+        )
+
+    async def delete(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFileDeleted:
+        """Delete a vector store file.
+
+        This will remove the file from the vector store but
+        the file itself will not be deleted. To delete the file, use the
+        [delete file](https://platform.openai.com/docs/api-reference/files/delete)
+        endpoint.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._delete(
+            f"/vector_stores/{vector_store_id}/files/{file_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreFileDeleted,
+        )
+
+    async def create_and_poll(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Attach a file to the given vector store and wait for it to be processed."""
+        await self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy)
+
+        return await self.poll(
+            file_id,
+            vector_store_id=vector_store_id,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    async def poll(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Wait for the vector store file to finish processing.
+
+        Note: this will return even if the file failed to process, you need to check
+        file.last_error and file.status to handle these cases
+        """
+        headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+        if is_given(poll_interval_ms):
+            headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        while True:
+            response = await self.with_raw_response.retrieve(
+                file_id,
+                vector_store_id=vector_store_id,
+                extra_headers=headers,
+            )
+
+            file = response.parse()
+            if file.status == "in_progress":
+                if not is_given(poll_interval_ms):
+                    from_header = response.headers.get("openai-poll-after-ms")
+                    if from_header is not None:
+                        poll_interval_ms = int(from_header)
+                    else:
+                        poll_interval_ms = 1000
+
+                await self._sleep(poll_interval_ms / 1000)
+            elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
+                return file
+            else:
+                if TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(file.status)
+                else:
+                    return file
+
+    async def upload(
+        self,
+        *,
+        vector_store_id: str,
+        file: FileTypes,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Upload a file to the `files` API and then attach it to the given vector store.
+
+        Note the file will be asynchronously processed (you can use the alternative
+        polling helper method to wait for processing to complete).
+        """
+        file_obj = await self._client.files.create(file=file, purpose="assistants")
+        return await self.create(
+            vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy
+        )
+
+    async def upload_and_poll(
+        self,
+        *,
+        vector_store_id: str,
+        file: FileTypes,
+        poll_interval_ms: int | NotGiven = NOT_GIVEN,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreFile:
+        """Add a file to a vector store and poll until processing is complete."""
+        file_obj = await self._client.files.create(file=file, purpose="assistants")
+        return await self.create_and_poll(
+            vector_store_id=vector_store_id,
+            file_id=file_obj.id,
+            poll_interval_ms=poll_interval_ms,
+            chunking_strategy=chunking_strategy,
+        )
+
+    def content(
+        self,
+        file_id: str,
+        *,
+        vector_store_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[FileContentResponse, AsyncPage[FileContentResponse]]:
+        """
+        Retrieve the parsed contents of a vector store file.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        if not file_id:
+            raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+            page=AsyncPage[FileContentResponse],
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=FileContentResponse,
+        )
+
+
+class FilesWithRawResponse:
+    def __init__(self, files: Files) -> None:
+        self._files = files
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            files.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            files.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            files.update,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            files.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            files.delete,
+        )
+        self.content = _legacy_response.to_raw_response_wrapper(
+            files.content,
+        )
+
+
+class AsyncFilesWithRawResponse:
+    def __init__(self, files: AsyncFiles) -> None:
+        self._files = files
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            files.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            files.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            files.update,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            files.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            files.delete,
+        )
+        self.content = _legacy_response.async_to_raw_response_wrapper(
+            files.content,
+        )
+
+
+class FilesWithStreamingResponse:
+    def __init__(self, files: Files) -> None:
+        self._files = files
+
+        self.create = to_streamed_response_wrapper(
+            files.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            files.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            files.update,
+        )
+        self.list = to_streamed_response_wrapper(
+            files.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            files.delete,
+        )
+        self.content = to_streamed_response_wrapper(
+            files.content,
+        )
+
+
+class AsyncFilesWithStreamingResponse:
+    def __init__(self, files: AsyncFiles) -> None:
+        self._files = files
+
+        self.create = async_to_streamed_response_wrapper(
+            files.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            files.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            files.update,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            files.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            files.delete,
+        )
+        self.content = async_to_streamed_response_wrapper(
+            files.content,
+        )
diff --git a/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py
new file mode 100644
index 00000000..aaa6ed27
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/resources/vector_stores/vector_stores.py
@@ -0,0 +1,868 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from .files import (
+    Files,
+    AsyncFiles,
+    FilesWithRawResponse,
+    AsyncFilesWithRawResponse,
+    FilesWithStreamingResponse,
+    AsyncFilesWithStreamingResponse,
+)
+from ...types import (
+    FileChunkingStrategyParam,
+    vector_store_list_params,
+    vector_store_create_params,
+    vector_store_search_params,
+    vector_store_update_params,
+)
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+    maybe_transform,
+    async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from .file_batches import (
+    FileBatches,
+    AsyncFileBatches,
+    FileBatchesWithRawResponse,
+    AsyncFileBatchesWithRawResponse,
+    FileBatchesWithStreamingResponse,
+    AsyncFileBatchesWithStreamingResponse,
+)
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.vector_store import VectorStore
+from ...types.vector_store_deleted import VectorStoreDeleted
+from ...types.shared_params.metadata import Metadata
+from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
+from ...types.vector_store_search_response import VectorStoreSearchResponse
+
+__all__ = ["VectorStores", "AsyncVectorStores"]
+
+
+class VectorStores(SyncAPIResource):
+    @cached_property
+    def files(self) -> Files:
+        return Files(self._client)
+
+    @cached_property
+    def file_batches(self) -> FileBatches:
+        return FileBatches(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> VectorStoresWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return VectorStoresWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> VectorStoresWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return VectorStoresWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+        expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+        file_ids: List[str] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        name: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStore:
+        """
+        Create a vector store.
+
+        Args:
+          chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+              strategy. Only applicable if `file_ids` is non-empty.
+
+          expires_after: The expiration policy for a vector store.
+
+          file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+              the vector store should use. Useful for tools like `file_search` that can access
+              files.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          name: The name of the vector store.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            "/vector_stores",
+            body=maybe_transform(
+                {
+                    "chunking_strategy": chunking_strategy,
+                    "expires_after": expires_after,
+                    "file_ids": file_ids,
+                    "metadata": metadata,
+                    "name": name,
+                },
+                vector_store_create_params.VectorStoreCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStore,
+        )
+
+    def retrieve(
+        self,
+        vector_store_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStore:
+        """
+        Retrieves a vector store.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get(
+            f"/vector_stores/{vector_store_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStore,
+        )
+
+    def update(
+        self,
+        vector_store_id: str,
+        *,
+        expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        name: Optional[str] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStore:
+        """
+        Modifies a vector store.
+
+        Args:
+          expires_after: The expiration policy for a vector store.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          name: The name of the vector store.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._post(
+            f"/vector_stores/{vector_store_id}",
+            body=maybe_transform(
+                {
+                    "expires_after": expires_after,
+                    "metadata": metadata,
+                    "name": name,
+                },
+                vector_store_update_params.VectorStoreUpdateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStore,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[VectorStore]:
+        """Returns a list of vector stores.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            "/vector_stores",
+            page=SyncCursorPage[VectorStore],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    vector_store_list_params.VectorStoreListParams,
+                ),
+            ),
+            model=VectorStore,
+        )
+
+    def delete(
+        self,
+        vector_store_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreDeleted:
+        """
+        Delete a vector store.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._delete(
+            f"/vector_stores/{vector_store_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreDeleted,
+        )
+
+    def search(
+        self,
+        vector_store_id: str,
+        *,
+        query: Union[str, List[str]],
+        filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
+        max_num_results: int | NotGiven = NOT_GIVEN,
+        ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
+        rewrite_query: bool | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncPage[VectorStoreSearchResponse]:
+        """
+        Search a vector store for relevant chunks based on a query and file attributes
+        filter.
+
+        Args:
+          query: A query string for a search
+
+          filters: A filter to apply based on file attributes.
+
+          max_num_results: The maximum number of results to return. This number should be between 1 and 50
+              inclusive.
+
+          ranking_options: Ranking options for search.
+
+          rewrite_query: Whether to rewrite the natural language query for vector search.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/search",
+            page=SyncPage[VectorStoreSearchResponse],
+            body=maybe_transform(
+                {
+                    "query": query,
+                    "filters": filters,
+                    "max_num_results": max_num_results,
+                    "ranking_options": ranking_options,
+                    "rewrite_query": rewrite_query,
+                },
+                vector_store_search_params.VectorStoreSearchParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=VectorStoreSearchResponse,
+            method="post",
+        )
+
+
+class AsyncVectorStores(AsyncAPIResource):
+    @cached_property
+    def files(self) -> AsyncFiles:
+        return AsyncFiles(self._client)
+
+    @cached_property
+    def file_batches(self) -> AsyncFileBatches:
+        return AsyncFileBatches(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncVectorStoresWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncVectorStoresWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncVectorStoresWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
+        expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+        file_ids: List[str] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        name: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStore:
+        """
+        Create a vector store.
+
+        Args:
+          chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+              strategy. Only applicable if `file_ids` is non-empty.
+
+          expires_after: The expiration policy for a vector store.
+
+          file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+              the vector store should use. Useful for tools like `file_search` that can access
+              files.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          name: The name of the vector store.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            "/vector_stores",
+            body=await async_maybe_transform(
+                {
+                    "chunking_strategy": chunking_strategy,
+                    "expires_after": expires_after,
+                    "file_ids": file_ids,
+                    "metadata": metadata,
+                    "name": name,
+                },
+                vector_store_create_params.VectorStoreCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStore,
+        )
+
+    async def retrieve(
+        self,
+        vector_store_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStore:
+        """
+        Retrieves a vector store.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._get(
+            f"/vector_stores/{vector_store_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStore,
+        )
+
+    async def update(
+        self,
+        vector_store_id: str,
+        *,
+        expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        name: Optional[str] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStore:
+        """
+        Modifies a vector store.
+
+        Args:
+          expires_after: The expiration policy for a vector store.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard.
+
+              Keys are strings with a maximum length of 64 characters. Values are strings with
+              a maximum length of 512 characters.
+
+          name: The name of the vector store.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._post(
+            f"/vector_stores/{vector_store_id}",
+            body=await async_maybe_transform(
+                {
+                    "expires_after": expires_after,
+                    "metadata": metadata,
+                    "name": name,
+                },
+                vector_store_update_params.VectorStoreUpdateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStore,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        before: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[VectorStore, AsyncCursorPage[VectorStore]]:
+        """Returns a list of vector stores.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          before: A cursor for use in pagination. `before` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              starting with obj_foo, your subsequent call can include before=obj_foo in order
+              to fetch the previous page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+              order and `desc` for descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            "/vector_stores",
+            page=AsyncCursorPage[VectorStore],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    vector_store_list_params.VectorStoreListParams,
+                ),
+            ),
+            model=VectorStore,
+        )
+
+    async def delete(
+        self,
+        vector_store_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> VectorStoreDeleted:
+        """
+        Delete a vector store.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._delete(
+            f"/vector_stores/{vector_store_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VectorStoreDeleted,
+        )
+
+    def search(
+        self,
+        vector_store_id: str,
+        *,
+        query: Union[str, List[str]],
+        filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
+        max_num_results: int | NotGiven = NOT_GIVEN,
+        ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
+        rewrite_query: bool | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[VectorStoreSearchResponse, AsyncPage[VectorStoreSearchResponse]]:
+        """
+        Search a vector store for relevant chunks based on a query and file attributes
+        filter.
+
+        Args:
+          query: A query string for a search
+
+          filters: A filter to apply based on file attributes.
+
+          max_num_results: The maximum number of results to return. This number should be between 1 and 50
+              inclusive.
+
+          ranking_options: Ranking options for search.
+
+          rewrite_query: Whether to rewrite the natural language query for vector search.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not vector_store_id:
+            raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/vector_stores/{vector_store_id}/search",
+            page=AsyncPage[VectorStoreSearchResponse],
+            body=maybe_transform(
+                {
+                    "query": query,
+                    "filters": filters,
+                    "max_num_results": max_num_results,
+                    "ranking_options": ranking_options,
+                    "rewrite_query": rewrite_query,
+                },
+                vector_store_search_params.VectorStoreSearchParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            model=VectorStoreSearchResponse,
+            method="post",
+        )
+
+
+class VectorStoresWithRawResponse:
+    def __init__(self, vector_stores: VectorStores) -> None:
+        self._vector_stores = vector_stores
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            vector_stores.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            vector_stores.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            vector_stores.update,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            vector_stores.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            vector_stores.delete,
+        )
+        self.search = _legacy_response.to_raw_response_wrapper(
+            vector_stores.search,
+        )
+
+    @cached_property
+    def files(self) -> FilesWithRawResponse:
+        return FilesWithRawResponse(self._vector_stores.files)
+
+    @cached_property
+    def file_batches(self) -> FileBatchesWithRawResponse:
+        return FileBatchesWithRawResponse(self._vector_stores.file_batches)
+
+
+class AsyncVectorStoresWithRawResponse:
+    def __init__(self, vector_stores: AsyncVectorStores) -> None:
+        self._vector_stores = vector_stores
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            vector_stores.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            vector_stores.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            vector_stores.update,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            vector_stores.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            vector_stores.delete,
+        )
+        self.search = _legacy_response.async_to_raw_response_wrapper(
+            vector_stores.search,
+        )
+
+    @cached_property
+    def files(self) -> AsyncFilesWithRawResponse:
+        return AsyncFilesWithRawResponse(self._vector_stores.files)
+
+    @cached_property
+    def file_batches(self) -> AsyncFileBatchesWithRawResponse:
+        return AsyncFileBatchesWithRawResponse(self._vector_stores.file_batches)
+
+
+class VectorStoresWithStreamingResponse:
+    def __init__(self, vector_stores: VectorStores) -> None:
+        self._vector_stores = vector_stores
+
+        self.create = to_streamed_response_wrapper(
+            vector_stores.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            vector_stores.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            vector_stores.update,
+        )
+        self.list = to_streamed_response_wrapper(
+            vector_stores.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            vector_stores.delete,
+        )
+        self.search = to_streamed_response_wrapper(
+            vector_stores.search,
+        )
+
+    @cached_property
+    def files(self) -> FilesWithStreamingResponse:
+        return FilesWithStreamingResponse(self._vector_stores.files)
+
+    @cached_property
+    def file_batches(self) -> FileBatchesWithStreamingResponse:
+        return FileBatchesWithStreamingResponse(self._vector_stores.file_batches)
+
+
+class AsyncVectorStoresWithStreamingResponse:
+    def __init__(self, vector_stores: AsyncVectorStores) -> None:
+        self._vector_stores = vector_stores
+
+        self.create = async_to_streamed_response_wrapper(
+            vector_stores.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            vector_stores.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            vector_stores.update,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            vector_stores.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            vector_stores.delete,
+        )
+        self.search = async_to_streamed_response_wrapper(
+            vector_stores.search,
+        )
+
+    @cached_property
+    def files(self) -> AsyncFilesWithStreamingResponse:
+        return AsyncFilesWithStreamingResponse(self._vector_stores.files)
+
+    @cached_property
+    def file_batches(self) -> AsyncFileBatchesWithStreamingResponse:
+        return AsyncFileBatchesWithStreamingResponse(self._vector_stores.file_batches)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/__init__.py
new file mode 100644
index 00000000..11761534
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/__init__.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .batch import Batch as Batch
+from .image import Image as Image
+from .model import Model as Model
+from .shared import (
+    Metadata as Metadata,
+    AllModels as AllModels,
+    ChatModel as ChatModel,
+    Reasoning as Reasoning,
+    ErrorObject as ErrorObject,
+    CompoundFilter as CompoundFilter,
+    ResponsesModel as ResponsesModel,
+    ReasoningEffort as ReasoningEffort,
+    ComparisonFilter as ComparisonFilter,
+    FunctionDefinition as FunctionDefinition,
+    FunctionParameters as FunctionParameters,
+    ResponseFormatText as ResponseFormatText,
+    ResponseFormatJSONObject as ResponseFormatJSONObject,
+    ResponseFormatJSONSchema as ResponseFormatJSONSchema,
+)
+from .upload import Upload as Upload
+from .embedding import Embedding as Embedding
+from .chat_model import ChatModel as ChatModel
+from .completion import Completion as Completion
+from .moderation import Moderation as Moderation
+from .audio_model import AudioModel as AudioModel
+from .batch_error import BatchError as BatchError
+from .file_object import FileObject as FileObject
+from .image_model import ImageModel as ImageModel
+from .file_content import FileContent as FileContent
+from .file_deleted import FileDeleted as FileDeleted
+from .file_purpose import FilePurpose as FilePurpose
+from .vector_store import VectorStore as VectorStore
+from .model_deleted import ModelDeleted as ModelDeleted
+from .embedding_model import EmbeddingModel as EmbeddingModel
+from .images_response import ImagesResponse as ImagesResponse
+from .completion_usage import CompletionUsage as CompletionUsage
+from .file_list_params import FileListParams as FileListParams
+from .moderation_model import ModerationModel as ModerationModel
+from .batch_list_params import BatchListParams as BatchListParams
+from .completion_choice import CompletionChoice as CompletionChoice
+from .image_edit_params import ImageEditParams as ImageEditParams
+from .file_create_params import FileCreateParams as FileCreateParams
+from .batch_create_params import BatchCreateParams as BatchCreateParams
+from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
+from .upload_create_params import UploadCreateParams as UploadCreateParams
+from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
+from .audio_response_format import AudioResponseFormat as AudioResponseFormat
+from .image_generate_params import ImageGenerateParams as ImageGenerateParams
+from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
+from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
+from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
+from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
+from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
+from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
+from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
+from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
+from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
+from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
+from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
+from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
+from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
+from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
+from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
+from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
+from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
+from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
+from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
+from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
+from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
+from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject
+from .static_file_chunking_strategy_object_param import (
+    StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py
new file mode 100644
index 00000000..396944ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/__init__.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .translation import Translation as Translation
+from .speech_model import SpeechModel as SpeechModel
+from .transcription import Transcription as Transcription
+from .transcription_word import TranscriptionWord as TranscriptionWord
+from .translation_verbose import TranslationVerbose as TranslationVerbose
+from .speech_create_params import SpeechCreateParams as SpeechCreateParams
+from .transcription_include import TranscriptionInclude as TranscriptionInclude
+from .transcription_segment import TranscriptionSegment as TranscriptionSegment
+from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose
+from .translation_create_params import TranslationCreateParams as TranslationCreateParams
+from .transcription_stream_event import TranscriptionStreamEvent as TranscriptionStreamEvent
+from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams
+from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse
+from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse
+from .transcription_text_done_event import TranscriptionTextDoneEvent as TranscriptionTextDoneEvent
+from .transcription_text_delta_event import TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py
new file mode 100644
index 00000000..95868071
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_create_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+from .speech_model import SpeechModel
+
+__all__ = ["SpeechCreateParams"]
+
+
+class SpeechCreateParams(TypedDict, total=False):
+    input: Required[str]
+    """The text to generate audio for. The maximum length is 4096 characters."""
+
+    model: Required[Union[str, SpeechModel]]
+    """
+    One of the available [TTS models](https://platform.openai.com/docs/models#tts):
+    `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+    """
+
+    voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]]
+    """The voice to use when generating the audio.
+
+    Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`,
+    `sage` and `shimmer`. Previews of the voices are available in the
+    [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
+    """
+
+    instructions: str
+    """Control the voice of your generated audio with additional instructions.
+
+    Does not work with `tts-1` or `tts-1-hd`.
+    """
+
+    response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
+    """The format to audio in.
+
+    Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
+    """
+
+    speed: float
+    """The speed of the generated audio.
+
+    Select a value from `0.25` to `4.0`. `1.0` is the default.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py
new file mode 100644
index 00000000..f004f805
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/speech_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["SpeechModel"]
+
+SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py
new file mode 100644
index 00000000..15763854
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["Transcription", "Logprob"]
+
+
+class Logprob(BaseModel):
+    token: Optional[str] = None
+    """The token in the transcription."""
+
+    bytes: Optional[List[float]] = None
+    """The bytes of the token."""
+
+    logprob: Optional[float] = None
+    """The log probability of the token."""
+
+
+class Transcription(BaseModel):
+    text: str
+    """The transcribed text."""
+
+    logprobs: Optional[List[Logprob]] = None
+    """The log probabilities of the tokens in the transcription.
+
+    Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`
+    if `logprobs` is added to the `include` array.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py
new file mode 100644
index 00000000..0cda4c79
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_params.py
@@ -0,0 +1,113 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ..._types import FileTypes
+from ..audio_model import AudioModel
+from .transcription_include import TranscriptionInclude
+from ..audio_response_format import AudioResponseFormat
+
+__all__ = [
+    "TranscriptionCreateParamsBase",
+    "TranscriptionCreateParamsNonStreaming",
+    "TranscriptionCreateParamsStreaming",
+]
+
+
+class TranscriptionCreateParamsBase(TypedDict, total=False):
+    file: Required[FileTypes]
+    """
+    The audio file object (not file name) to transcribe, in one of these formats:
+    flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+    """
+
+    model: Required[Union[str, AudioModel]]
+    """ID of the model to use.
+
+    The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1`
+    (which is powered by our open source Whisper V2 model).
+    """
+
+    include: List[TranscriptionInclude]
+    """Additional information to include in the transcription response.
+
+    `logprobs` will return the log probabilities of the tokens in the response to
+    understand the model's confidence in the transcription. `logprobs` only works
+    with response_format set to `json` and only with the models `gpt-4o-transcribe`
+    and `gpt-4o-mini-transcribe`.
+    """
+
+    language: str
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    prompt: str
+    """An optional text to guide the model's style or continue a previous audio
+    segment.
+
+    The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+    should match the audio language.
+    """
+
+    response_format: AudioResponseFormat
+    """
+    The format of the output, in one of these options: `json`, `text`, `srt`,
+    `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
+    the only supported format is `json`.
+    """
+
+    temperature: float
+    """The sampling temperature, between 0 and 1.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic. If set to 0, the model will use
+    [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+    automatically increase the temperature until certain thresholds are hit.
+    """
+
+    timestamp_granularities: List[Literal["word", "segment"]]
+    """The timestamp granularities to populate for this transcription.
+
+    `response_format` must be set `verbose_json` to use timestamp granularities.
+    Either or both of these options are supported: `word`, or `segment`. Note: There
+    is no additional latency for segment timestamps, but generating word timestamps
+    incurs additional latency.
+    """
+
+
+class TranscriptionCreateParamsNonStreaming(TranscriptionCreateParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+    for more information.
+
+    Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+    """
+
+
+class TranscriptionCreateParamsStreaming(TranscriptionCreateParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+    for more information.
+
+    Note: Streaming is not supported for the `whisper-1` model and will be ignored.
+    """
+
+
+TranscriptionCreateParams = Union[TranscriptionCreateParamsNonStreaming, TranscriptionCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py
new file mode 100644
index 00000000..2f7bed81
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_create_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .transcription import Transcription
+from .transcription_verbose import TranscriptionVerbose
+
+__all__ = ["TranscriptionCreateResponse"]
+
+TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py
new file mode 100644
index 00000000..0e464ac9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_include.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["TranscriptionInclude"]
+
+TranscriptionInclude: TypeAlias = Literal["logprobs"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py
new file mode 100644
index 00000000..522c401e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_segment.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionSegment"]
+
+
+class TranscriptionSegment(BaseModel):
+    id: int
+    """Unique identifier of the segment."""
+
+    avg_logprob: float
+    """Average logprob of the segment.
+
+    If the value is lower than -1, consider the logprobs failed.
+    """
+
+    compression_ratio: float
+    """Compression ratio of the segment.
+
+    If the value is greater than 2.4, consider the compression failed.
+    """
+
+    end: float
+    """End time of the segment in seconds."""
+
+    no_speech_prob: float
+    """Probability of no speech in the segment.
+
+    If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this
+    segment silent.
+    """
+
+    seek: int
+    """Seek offset of the segment."""
+
+    start: float
+    """Start time of the segment in seconds."""
+
+    temperature: float
+    """Temperature parameter used for generating the segment."""
+
+    text: str
+    """Text content of the segment."""
+
+    tokens: List[int]
+    """Array of token IDs for the text content."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py
new file mode 100644
index 00000000..757077a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_stream_event.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .transcription_text_done_event import TranscriptionTextDoneEvent
+from .transcription_text_delta_event import TranscriptionTextDeltaEvent
+
+__all__ = ["TranscriptionStreamEvent"]
+
+TranscriptionStreamEvent: TypeAlias = Annotated[
+    Union[TranscriptionTextDeltaEvent, TranscriptionTextDoneEvent], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py
new file mode 100644
index 00000000..f8d53554
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_delta_event.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionTextDeltaEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+    token: Optional[str] = None
+    """The token that was used to generate the log probability."""
+
+    bytes: Optional[List[object]] = None
+    """The bytes that were used to generate the log probability."""
+
+    logprob: Optional[float] = None
+    """The log probability of the token."""
+
+
+class TranscriptionTextDeltaEvent(BaseModel):
+    delta: str
+    """The text delta that was additionally transcribed."""
+
+    type: Literal["transcript.text.delta"]
+    """The type of the event. Always `transcript.text.delta`."""
+
+    logprobs: Optional[List[Logprob]] = None
+    """The log probabilities of the delta.
+
+    Only included if you
+    [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
+    with the `include[]` parameter set to `logprobs`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py
new file mode 100644
index 00000000..3f1a713a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_text_done_event.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionTextDoneEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+    token: Optional[str] = None
+    """The token that was used to generate the log probability."""
+
+    bytes: Optional[List[object]] = None
+    """The bytes that were used to generate the log probability."""
+
+    logprob: Optional[float] = None
+    """The log probability of the token."""
+
+
+class TranscriptionTextDoneEvent(BaseModel):
+    text: str
+    """The text that was transcribed."""
+
+    type: Literal["transcript.text.done"]
+    """The type of the event. Always `transcript.text.done`."""
+
+    logprobs: Optional[List[Logprob]] = None
+    """The log probabilities of the individual tokens in the transcription.
+
+    Only included if you
+    [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
+    with the `include[]` parameter set to `logprobs`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py
new file mode 100644
index 00000000..2a670189
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_verbose.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .transcription_word import TranscriptionWord
+from .transcription_segment import TranscriptionSegment
+
+__all__ = ["TranscriptionVerbose"]
+
+
+class TranscriptionVerbose(BaseModel):
+    duration: float
+    """The duration of the input audio."""
+
+    language: str
+    """The language of the input audio."""
+
+    text: str
+    """The transcribed text."""
+
+    segments: Optional[List[TranscriptionSegment]] = None
+    """Segments of the transcribed text and their corresponding details."""
+
+    words: Optional[List[TranscriptionWord]] = None
+    """Extracted words and their corresponding timestamps."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py
new file mode 100644
index 00000000..969da325
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/transcription_word.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["TranscriptionWord"]
+
+
+class TranscriptionWord(BaseModel):
+    end: float
+    """End time of the word in seconds."""
+
+    start: float
+    """Start time of the word in seconds."""
+
+    word: str
+    """The text content of the word."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation.py
new file mode 100644
index 00000000..7c0e9051
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["Translation"]
+
+
+class Translation(BaseModel):
+    text: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py
new file mode 100644
index 00000000..b23a1853
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_params.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+from ..._types import FileTypes
+from ..audio_model import AudioModel
+
+__all__ = ["TranslationCreateParams"]
+
+
+class TranslationCreateParams(TypedDict, total=False):
+    file: Required[FileTypes]
+    """
+    The audio file object (not file name) translate, in one of these formats: flac,
+    mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+    """
+
+    model: Required[Union[str, AudioModel]]
+    """ID of the model to use.
+
+    Only `whisper-1` (which is powered by our open source Whisper V2 model) is
+    currently available.
+    """
+
+    prompt: str
+    """An optional text to guide the model's style or continue a previous audio
+    segment.
+
+    The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+    should be in English.
+    """
+
+    response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
+    """
+    The format of the output, in one of these options: `json`, `text`, `srt`,
+    `verbose_json`, or `vtt`.
+    """
+
+    temperature: float
+    """The sampling temperature, between 0 and 1.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic. If set to 0, the model will use
+    [log probability](https://en.wikipedia.org/wiki/Log_probability) to
+    automatically increase the temperature until certain thresholds are hit.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py
new file mode 100644
index 00000000..9953813c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_create_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .translation import Translation
+from .translation_verbose import TranslationVerbose
+
+__all__ = ["TranslationCreateResponse"]
+
+TranslationCreateResponse: TypeAlias = Union[Translation, TranslationVerbose]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py
new file mode 100644
index 00000000..27cb02d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio/translation_verbose.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .transcription_segment import TranscriptionSegment
+
+__all__ = ["TranslationVerbose"]
+
+
+class TranslationVerbose(BaseModel):
+    duration: float
+    """The duration of the input audio."""
+
+    language: str
+    """The language of the output translation (always `english`)."""
+
+    text: str
+    """The translated text."""
+
+    segments: Optional[List[TranscriptionSegment]] = None
+    """Segments of the translated text and their corresponding details."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio_model.py b/.venv/lib/python3.12/site-packages/openai/types/audio_model.py
new file mode 100644
index 00000000..4d14d601
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["AudioModel"]
+
+AudioModel: TypeAlias = Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py b/.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py
new file mode 100644
index 00000000..f8c8d459
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/audio_response_format.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["AudioResponseFormat"]
+
+AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py b/.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py
new file mode 100644
index 00000000..6f17836b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/auto_file_chunking_strategy_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["AutoFileChunkingStrategyParam"]
+
+
+class AutoFileChunkingStrategyParam(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch.py b/.venv/lib/python3.12/site-packages/openai/types/batch.py
new file mode 100644
index 00000000..35de90ac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch.py
@@ -0,0 +1,87 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .batch_error import BatchError
+from .shared.metadata import Metadata
+from .batch_request_counts import BatchRequestCounts
+
+__all__ = ["Batch", "Errors"]
+
+
+class Errors(BaseModel):
+    data: Optional[List[BatchError]] = None
+
+    object: Optional[str] = None
+    """The object type, which is always `list`."""
+
+
+class Batch(BaseModel):
+    id: str
+
+    completion_window: str
+    """The time frame within which the batch should be processed."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the batch was created."""
+
+    endpoint: str
+    """The OpenAI API endpoint used by the batch."""
+
+    input_file_id: str
+    """The ID of the input file for the batch."""
+
+    object: Literal["batch"]
+    """The object type, which is always `batch`."""
+
+    status: Literal[
+        "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
+    ]
+    """The current status of the batch."""
+
+    cancelled_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch was cancelled."""
+
+    cancelling_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch started cancelling."""
+
+    completed_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch was completed."""
+
+    error_file_id: Optional[str] = None
+    """The ID of the file containing the outputs of requests with errors."""
+
+    errors: Optional[Errors] = None
+
+    expired_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch expired."""
+
+    expires_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch will expire."""
+
+    failed_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch failed."""
+
+    finalizing_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch started finalizing."""
+
+    in_progress_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the batch started processing."""
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    output_file_id: Optional[str] = None
+    """The ID of the file containing the outputs of successfully executed requests."""
+
+    request_counts: Optional[BatchRequestCounts] = None
+    """The request counts for different statuses within the batch."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py
new file mode 100644
index 00000000..cc95afd3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_create_params.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .shared_params.metadata import Metadata
+
+__all__ = ["BatchCreateParams"]
+
+
+class BatchCreateParams(TypedDict, total=False):
+    completion_window: Required[Literal["24h"]]
+    """The time frame within which the batch should be processed.
+
+    Currently only `24h` is supported.
+    """
+
+    endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
+    """The endpoint to be used for all requests in the batch.
+
+    Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and
+    `/v1/completions` are supported. Note that `/v1/embeddings` batches are also
+    restricted to a maximum of 50,000 embedding inputs across all requests in the
+    batch.
+    """
+
+    input_file_id: Required[str]
+    """The ID of an uploaded file that contains requests for the new batch.
+
+    See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+    for how to upload a file.
+
+    Your input file must be formatted as a
+    [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
+    and must be uploaded with the purpose `batch`. The file can contain up to 50,000
+    requests, and can be up to 200 MB in size.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_error.py b/.venv/lib/python3.12/site-packages/openai/types/batch_error.py
new file mode 100644
index 00000000..1cdd808d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_error.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["BatchError"]
+
+
+class BatchError(BaseModel):
+    code: Optional[str] = None
+    """An error code identifying the error type."""
+
+    line: Optional[int] = None
+    """The line number of the input file where the error occurred, if applicable."""
+
+    message: Optional[str] = None
+    """A human-readable message providing more details about the error."""
+
+    param: Optional[str] = None
+    """The name of the parameter that caused the error, if applicable."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py
new file mode 100644
index 00000000..ef5e966b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["BatchListParams"]
+
+
+class BatchListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py b/.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py
new file mode 100644
index 00000000..7e1d49fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/batch_request_counts.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["BatchRequestCounts"]
+
+
+class BatchRequestCounts(BaseModel):
+    completed: int
+    """Number of requests that have been completed successfully."""
+
+    failed: int
+    """Number of requests that have failed."""
+
+    total: int
+    """Total number of requests in the batch."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py
new file mode 100644
index 00000000..5ba3eadf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .thread import Thread as Thread
+from .assistant import Assistant as Assistant
+from .function_tool import FunctionTool as FunctionTool
+from .assistant_tool import AssistantTool as AssistantTool
+from .thread_deleted import ThreadDeleted as ThreadDeleted
+from .file_search_tool import FileSearchTool as FileSearchTool
+from .assistant_deleted import AssistantDeleted as AssistantDeleted
+from .function_tool_param import FunctionToolParam as FunctionToolParam
+from .assistant_tool_param import AssistantToolParam as AssistantToolParam
+from .thread_create_params import ThreadCreateParams as ThreadCreateParams
+from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams
+from .assistant_list_params import AssistantListParams as AssistantListParams
+from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice
+from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool
+from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
+from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
+from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
+from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
+from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
+from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
+from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
+from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
+from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
+from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
+from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam
+from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam
+from .assistant_response_format_option_param import (
+    AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py
new file mode 100644
index 00000000..58421e0f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant.py
@@ -0,0 +1,134 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .assistant_tool import AssistantTool
+from ..shared.metadata import Metadata
+from .assistant_response_format_option import AssistantResponseFormatOption
+
+__all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ToolResourcesCodeInterpreter(BaseModel):
+    file_ids: Optional[List[str]] = None
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+    available to the `code_interpreter`` tool. There can be a maximum of 20 files
+    associated with the tool.
+    """
+
+
+class ToolResourcesFileSearch(BaseModel):
+    vector_store_ids: Optional[List[str]] = None
+    """
+    The ID of the
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this assistant. There can be a maximum of 1 vector store attached to
+    the assistant.
+    """
+
+
+class ToolResources(BaseModel):
+    code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
+
+    file_search: Optional[ToolResourcesFileSearch] = None
+
+
+class Assistant(BaseModel):
+    id: str
+    """The identifier, which can be referenced in API endpoints."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the assistant was created."""
+
+    description: Optional[str] = None
+    """The description of the assistant. The maximum length is 512 characters."""
+
+    instructions: Optional[str] = None
+    """The system instructions that the assistant uses.
+
+    The maximum length is 256,000 characters.
+    """
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    model: str
+    """ID of the model to use.
+
+    You can use the
+    [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+    see all of your available models, or see our
+    [Model overview](https://platform.openai.com/docs/models) for descriptions of
+    them.
+    """
+
+    name: Optional[str] = None
+    """The name of the assistant. The maximum length is 256 characters."""
+
+    object: Literal["assistant"]
+    """The object type, which is always `assistant`."""
+
+    tools: List[AssistantTool]
+    """A list of tool enabled on the assistant.
+
+    There can be a maximum of 128 tools per assistant. Tools can be of types
+    `code_interpreter`, `file_search`, or `function`.
+    """
+
+    response_format: Optional[AssistantResponseFormatOption] = None
+    """Specifies the format that the model must output.
+
+    Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+    [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+    and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which ensures the model will match your supplied JSON schema. Learn more
+    in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+    message the model generates is valid JSON.
+
+    **Important:** when using JSON mode, you **must** also instruct the model to
+    produce JSON yourself via a system or user message. Without this, the model may
+    generate an unending stream of whitespace until the generation reaches the token
+    limit, resulting in a long-running and seemingly "stuck" request. Also note that
+    the message content may be partially cut off if `finish_reason="length"`, which
+    indicates the generation exceeded `max_tokens` or the conversation exceeded the
+    max context length.
+    """
+
+    temperature: Optional[float] = None
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+    """
+
+    tool_resources: Optional[ToolResources] = None
+    """A set of resources that are used by the assistant's tools.
+
+    The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
+
+    top_p: Optional[float] = None
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or temperature but not both.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py
new file mode 100644
index 00000000..8b3c3318
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_create_params.py
@@ -0,0 +1,212 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared.chat_model import ChatModel
+from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = [
+    "AssistantCreateParams",
+    "ToolResources",
+    "ToolResourcesCodeInterpreter",
+    "ToolResourcesFileSearch",
+    "ToolResourcesFileSearchVectorStore",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategy",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
+]
+
+
+class AssistantCreateParams(TypedDict, total=False):
+    model: Required[Union[str, ChatModel]]
+    """ID of the model to use.
+
+    You can use the
+    [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+    see all of your available models, or see our
+    [Model overview](https://platform.openai.com/docs/models) for descriptions of
+    them.
+    """
+
+    description: Optional[str]
+    """The description of the assistant. The maximum length is 512 characters."""
+
+    instructions: Optional[str]
+    """The system instructions that the assistant uses.
+
+    The maximum length is 256,000 characters.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    name: Optional[str]
+    """The name of the assistant. The maximum length is 256 characters."""
+
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    response_format: Optional[AssistantResponseFormatOptionParam]
+    """Specifies the format that the model must output.
+
+    Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+    [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+    and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which ensures the model will match your supplied JSON schema. Learn more
+    in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+    message the model generates is valid JSON.
+
+    **Important:** when using JSON mode, you **must** also instruct the model to
+    produce JSON yourself via a system or user message. Without this, the model may
+    generate an unending stream of whitespace until the generation reaches the token
+    limit, resulting in a long-running and seemingly "stuck" request. Also note that
+    the message content may be partially cut off if `finish_reason="length"`, which
+    indicates the generation exceeded `max_tokens` or the conversation exceeded the
+    max context length.
+    """
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+    """
+
+    tool_resources: Optional[ToolResources]
+    """A set of resources that are used by the assistant's tools.
+
+    The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
+
+    tools: Iterable[AssistantToolParam]
+    """A list of tool enabled on the assistant.
+
+    There can be a maximum of 128 tools per assistant. Tools can be of types
+    `code_interpreter`, `file_search`, or `function`.
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or temperature but not both.
+    """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+    available to the `code_interpreter` tool. There can be a maximum of 20 files
+    associated with the tool.
+    """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+    chunk_overlap_tokens: Required[int]
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: Required[int]
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+    static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""
+
+
+ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+    ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
+]
+
+
+class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
+    chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
+    """The chunking strategy used to chunk the file(s).
+
+    If not set, will use the `auto` strategy.
+    """
+
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+    add to the vector store. There can be a maximum of 10000 files in a vector
+    store.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+    vector_store_ids: List[str]
+    """
+    The
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this assistant. There can be a maximum of 1 vector store attached to
+    the assistant.
+    """
+
+    vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
+    """
+    A helper to create a
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    with file_ids and attach it to this assistant. There can be a maximum of 1
+    vector store attached to the assistant.
+    """
+
+
+class ToolResources(TypedDict, total=False):
+    code_interpreter: ToolResourcesCodeInterpreter
+
+    file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py
new file mode 100644
index 00000000..3be40cd6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["AssistantDeleted"]
+
+
+class AssistantDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["assistant.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py
new file mode 100644
index 00000000..834ffbca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_list_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["AssistantListParams"]
+
+
+class AssistantListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    before: str
+    """A cursor for use in pagination.
+
+    `before` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, starting with obj_foo, your
+    subsequent call can include before=obj_foo in order to fetch the previous page
+    of the list.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py
new file mode 100644
index 00000000..6f06a344
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from ..shared.response_format_text import ResponseFormatText
+from ..shared.response_format_json_object import ResponseFormatJSONObject
+from ..shared.response_format_json_schema import ResponseFormatJSONSchema
+
+__all__ = ["AssistantResponseFormatOption"]
+
+AssistantResponseFormatOption: TypeAlias = Union[
+    Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py
new file mode 100644
index 00000000..5e724a4d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_response_format_option_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from ..shared_params.response_format_text import ResponseFormatText
+from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
+
+__all__ = ["AssistantResponseFormatOptionParam"]
+
+AssistantResponseFormatOptionParam: TypeAlias = Union[
+    Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py
new file mode 100644
index 00000000..41d3a0c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_stream_event.py
@@ -0,0 +1,294 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .thread import Thread
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .threads.run import Run
+from .threads.message import Message
+from ..shared.error_object import ErrorObject
+from .threads.runs.run_step import RunStep
+from .threads.message_delta_event import MessageDeltaEvent
+from .threads.runs.run_step_delta_event import RunStepDeltaEvent
+
+__all__ = [
+    "AssistantStreamEvent",
+    "ThreadCreated",
+    "ThreadRunCreated",
+    "ThreadRunQueued",
+    "ThreadRunInProgress",
+    "ThreadRunRequiresAction",
+    "ThreadRunCompleted",
+    "ThreadRunIncomplete",
+    "ThreadRunFailed",
+    "ThreadRunCancelling",
+    "ThreadRunCancelled",
+    "ThreadRunExpired",
+    "ThreadRunStepCreated",
+    "ThreadRunStepInProgress",
+    "ThreadRunStepDelta",
+    "ThreadRunStepCompleted",
+    "ThreadRunStepFailed",
+    "ThreadRunStepCancelled",
+    "ThreadRunStepExpired",
+    "ThreadMessageCreated",
+    "ThreadMessageInProgress",
+    "ThreadMessageDelta",
+    "ThreadMessageCompleted",
+    "ThreadMessageIncomplete",
+    "ErrorEvent",
+]
+
+
+class ThreadCreated(BaseModel):
+    data: Thread
+    """
+    Represents a thread that contains
+    [messages](https://platform.openai.com/docs/api-reference/messages).
+    """
+
+    event: Literal["thread.created"]
+
+    enabled: Optional[bool] = None
+    """Whether to enable input audio transcription."""
+
+
+class ThreadRunCreated(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.created"]
+
+
+class ThreadRunQueued(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.queued"]
+
+
+class ThreadRunInProgress(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.in_progress"]
+
+
+class ThreadRunRequiresAction(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.requires_action"]
+
+
+class ThreadRunCompleted(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.completed"]
+
+
+class ThreadRunIncomplete(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.incomplete"]
+
+
+class ThreadRunFailed(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.failed"]
+
+
+class ThreadRunCancelling(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.cancelling"]
+
+
+class ThreadRunCancelled(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.cancelled"]
+
+
+class ThreadRunExpired(BaseModel):
+    data: Run
+    """
+    Represents an execution run on a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.run.expired"]
+
+
+class ThreadRunStepCreated(BaseModel):
+    data: RunStep
+    """Represents a step in execution of a run."""
+
+    event: Literal["thread.run.step.created"]
+
+
+class ThreadRunStepInProgress(BaseModel):
+    data: RunStep
+    """Represents a step in execution of a run."""
+
+    event: Literal["thread.run.step.in_progress"]
+
+
+class ThreadRunStepDelta(BaseModel):
+    data: RunStepDeltaEvent
+    """Represents a run step delta i.e.
+
+    any changed fields on a run step during streaming.
+    """
+
+    event: Literal["thread.run.step.delta"]
+
+
+class ThreadRunStepCompleted(BaseModel):
+    data: RunStep
+    """Represents a step in execution of a run."""
+
+    event: Literal["thread.run.step.completed"]
+
+
+class ThreadRunStepFailed(BaseModel):
+    data: RunStep
+    """Represents a step in execution of a run."""
+
+    event: Literal["thread.run.step.failed"]
+
+
+class ThreadRunStepCancelled(BaseModel):
+    data: RunStep
+    """Represents a step in execution of a run."""
+
+    event: Literal["thread.run.step.cancelled"]
+
+
+class ThreadRunStepExpired(BaseModel):
+    data: RunStep
+    """Represents a step in execution of a run."""
+
+    event: Literal["thread.run.step.expired"]
+
+
+class ThreadMessageCreated(BaseModel):
+    data: Message
+    """
+    Represents a message within a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.message.created"]
+
+
+class ThreadMessageInProgress(BaseModel):
+    data: Message
+    """
+    Represents a message within a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.message.in_progress"]
+
+
+class ThreadMessageDelta(BaseModel):
+    data: MessageDeltaEvent
+    """Represents a message delta i.e.
+
+    any changed fields on a message during streaming.
+    """
+
+    event: Literal["thread.message.delta"]
+
+
+class ThreadMessageCompleted(BaseModel):
+    data: Message
+    """
+    Represents a message within a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.message.completed"]
+
+
+class ThreadMessageIncomplete(BaseModel):
+    data: Message
+    """
+    Represents a message within a
+    [thread](https://platform.openai.com/docs/api-reference/threads).
+    """
+
+    event: Literal["thread.message.incomplete"]
+
+
+class ErrorEvent(BaseModel):
+    data: ErrorObject
+
+    event: Literal["error"]
+
+
+AssistantStreamEvent: TypeAlias = Annotated[
+    Union[
+        ThreadCreated,
+        ThreadRunCreated,
+        ThreadRunQueued,
+        ThreadRunInProgress,
+        ThreadRunRequiresAction,
+        ThreadRunCompleted,
+        ThreadRunIncomplete,
+        ThreadRunFailed,
+        ThreadRunCancelling,
+        ThreadRunCancelled,
+        ThreadRunExpired,
+        ThreadRunStepCreated,
+        ThreadRunStepInProgress,
+        ThreadRunStepDelta,
+        ThreadRunStepCompleted,
+        ThreadRunStepFailed,
+        ThreadRunStepCancelled,
+        ThreadRunStepExpired,
+        ThreadMessageCreated,
+        ThreadMessageInProgress,
+        ThreadMessageDelta,
+        ThreadMessageCompleted,
+        ThreadMessageIncomplete,
+        ErrorEvent,
+    ],
+    PropertyInfo(discriminator="event"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py
new file mode 100644
index 00000000..1bde6858
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .function_tool import FunctionTool
+from .file_search_tool import FileSearchTool
+from .code_interpreter_tool import CodeInterpreterTool
+
+__all__ = ["AssistantTool"]
+
+AssistantTool: TypeAlias = Annotated[
+    Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py
new file mode 100644
index 00000000..d73439f0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .assistant_tool_choice_function import AssistantToolChoiceFunction
+
+__all__ = ["AssistantToolChoice"]
+
+
+class AssistantToolChoice(BaseModel):
+    type: Literal["function", "code_interpreter", "file_search"]
+    """The type of the tool. If type is `function`, the function name must be set"""
+
+    function: Optional[AssistantToolChoiceFunction] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py
new file mode 100644
index 00000000..0c896d80
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["AssistantToolChoiceFunction"]
+
+
+class AssistantToolChoiceFunction(BaseModel):
+    name: str
+    """The name of the function to call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py
new file mode 100644
index 00000000..428857de
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_function_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["AssistantToolChoiceFunctionParam"]
+
+
+class AssistantToolChoiceFunctionParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py
new file mode 100644
index 00000000..e57c3278
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .assistant_tool_choice import AssistantToolChoice
+
+__all__ = ["AssistantToolChoiceOption"]
+
+AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py
new file mode 100644
index 00000000..cc0053d3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_option_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .assistant_tool_choice_param import AssistantToolChoiceParam
+
+__all__ = ["AssistantToolChoiceOptionParam"]
+
+AssistantToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py
new file mode 100644
index 00000000..904f489e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_choice_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam
+
+__all__ = ["AssistantToolChoiceParam"]
+
+
+class AssistantToolChoiceParam(TypedDict, total=False):
+    type: Required[Literal["function", "code_interpreter", "file_search"]]
+    """The type of the tool. If type is `function`, the function name must be set"""
+
+    function: AssistantToolChoiceFunctionParam
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py
new file mode 100644
index 00000000..321c4b1d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_tool_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .function_tool_param import FunctionToolParam
+from .file_search_tool_param import FileSearchToolParam
+from .code_interpreter_tool_param import CodeInterpreterToolParam
+
+__all__ = ["AssistantToolParam"]
+
+AssistantToolParam: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py
new file mode 100644
index 00000000..d3ec7614
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/assistant_update_params.py
@@ -0,0 +1,171 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, TypedDict
+
+from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class AssistantUpdateParams(TypedDict, total=False):
+    description: Optional[str]
+    """The description of the assistant. The maximum length is 512 characters."""
+
+    instructions: Optional[str]
+    """The system instructions that the assistant uses.
+
+    The maximum length is 256,000 characters.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    model: Union[
+        str,
+        Literal[
+            "o3-mini",
+            "o3-mini-2025-01-31",
+            "o1",
+            "o1-2024-12-17",
+            "gpt-4o",
+            "gpt-4o-2024-11-20",
+            "gpt-4o-2024-08-06",
+            "gpt-4o-2024-05-13",
+            "gpt-4o-mini",
+            "gpt-4o-mini-2024-07-18",
+            "gpt-4.5-preview",
+            "gpt-4.5-preview-2025-02-27",
+            "gpt-4-turbo",
+            "gpt-4-turbo-2024-04-09",
+            "gpt-4-0125-preview",
+            "gpt-4-turbo-preview",
+            "gpt-4-1106-preview",
+            "gpt-4-vision-preview",
+            "gpt-4",
+            "gpt-4-0314",
+            "gpt-4-0613",
+            "gpt-4-32k",
+            "gpt-4-32k-0314",
+            "gpt-4-32k-0613",
+            "gpt-3.5-turbo",
+            "gpt-3.5-turbo-16k",
+            "gpt-3.5-turbo-0613",
+            "gpt-3.5-turbo-1106",
+            "gpt-3.5-turbo-0125",
+            "gpt-3.5-turbo-16k-0613",
+        ],
+    ]
+    """ID of the model to use.
+
+    You can use the
+    [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+    see all of your available models, or see our
+    [Model overview](https://platform.openai.com/docs/models) for descriptions of
+    them.
+    """
+
+    name: Optional[str]
+    """The name of the assistant. The maximum length is 256 characters."""
+
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    response_format: Optional[AssistantResponseFormatOptionParam]
+    """Specifies the format that the model must output.
+
+    Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+    [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+    and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which ensures the model will match your supplied JSON schema. Learn more
+    in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+    message the model generates is valid JSON.
+
+    **Important:** when using JSON mode, you **must** also instruct the model to
+    produce JSON yourself via a system or user message. Without this, the model may
+    generate an unending stream of whitespace until the generation reaches the token
+    limit, resulting in a long-running and seemingly "stuck" request. Also note that
+    the message content may be partially cut off if `finish_reason="length"`, which
+    indicates the generation exceeded `max_tokens` or the conversation exceeded the
+    max context length.
+    """
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+    """
+
+    tool_resources: Optional[ToolResources]
+    """A set of resources that are used by the assistant's tools.
+
+    The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
+
+    tools: Iterable[AssistantToolParam]
+    """A list of tool enabled on the assistant.
+
+    There can be a maximum of 128 tools per assistant. Tools can be of types
+    `code_interpreter`, `file_search`, or `function`.
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or temperature but not both.
+    """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+    file_ids: List[str]
+    """
+    Overrides the list of
+    [file](https://platform.openai.com/docs/api-reference/files) IDs made available
+    to the `code_interpreter` tool. There can be a maximum of 20 files associated
+    with the tool.
+    """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+    vector_store_ids: List[str]
+    """
+    Overrides the
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this assistant. There can be a maximum of 1 vector store attached to
+    the assistant.
+    """
+
+
+class ToolResources(TypedDict, total=False):
+    code_interpreter: ToolResourcesCodeInterpreter
+
+    file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/chat/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py
new file mode 100644
index 00000000..17ab3de6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["CodeInterpreterTool"]
+
+
+class CodeInterpreterTool(BaseModel):
+    type: Literal["code_interpreter"]
+    """The type of tool being defined: `code_interpreter`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py
new file mode 100644
index 00000000..4f6916d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/code_interpreter_tool_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["CodeInterpreterToolParam"]
+
+
+class CodeInterpreterToolParam(TypedDict, total=False):
+    type: Required[Literal["code_interpreter"]]
+    """The type of tool being defined: `code_interpreter`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py
new file mode 100644
index 00000000..89fc16c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileSearchTool", "FileSearch", "FileSearchRankingOptions"]
+
+
+class FileSearchRankingOptions(BaseModel):
+    score_threshold: float
+    """The score threshold for the file search.
+
+    All values must be a floating point number between 0 and 1.
+    """
+
+    ranker: Optional[Literal["auto", "default_2024_08_21"]] = None
+    """The ranker to use for the file search.
+
+    If not specified will use the `auto` ranker.
+    """
+
+
+class FileSearch(BaseModel):
+    max_num_results: Optional[int] = None
+    """The maximum number of results the file search tool should output.
+
+    The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+    should be between 1 and 50 inclusive.
+
+    Note that the file search tool may output fewer than `max_num_results` results.
+    See the
+    [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+    for more information.
+    """
+
+    ranking_options: Optional[FileSearchRankingOptions] = None
+    """The ranking options for the file search.
+
+    If not specified, the file search tool will use the `auto` ranker and a
+    score_threshold of 0.
+
+    See the
+    [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+    for more information.
+    """
+
+
+class FileSearchTool(BaseModel):
+    type: Literal["file_search"]
+    """The type of tool being defined: `file_search`"""
+
+    file_search: Optional[FileSearch] = None
+    """Overrides for the file search tool."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py
new file mode 100644
index 00000000..c73d0af7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/file_search_tool_param.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FileSearchToolParam", "FileSearch", "FileSearchRankingOptions"]
+
+
+class FileSearchRankingOptions(TypedDict, total=False):
+    score_threshold: Required[float]
+    """The score threshold for the file search.
+
+    All values must be a floating point number between 0 and 1.
+    """
+
+    ranker: Literal["auto", "default_2024_08_21"]
+    """The ranker to use for the file search.
+
+    If not specified will use the `auto` ranker.
+    """
+
+
+class FileSearch(TypedDict, total=False):
+    max_num_results: int
+    """The maximum number of results the file search tool should output.
+
+    The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+    should be between 1 and 50 inclusive.
+
+    Note that the file search tool may output fewer than `max_num_results` results.
+    See the
+    [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+    for more information.
+    """
+
+    ranking_options: FileSearchRankingOptions
+    """The ranking options for the file search.
+
+    If not specified, the file search tool will use the `auto` ranker and a
+    score_threshold of 0.
+
+    See the
+    [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+    for more information.
+    """
+
+
+class FileSearchToolParam(TypedDict, total=False):
+    type: Required[Literal["file_search"]]
+    """The type of tool being defined: `file_search`"""
+
+    file_search: FileSearch
+    """Overrides for the file search tool."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py
new file mode 100644
index 00000000..f9227678
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.function_definition import FunctionDefinition
+
+__all__ = ["FunctionTool"]
+
+
+class FunctionTool(BaseModel):
+    function: FunctionDefinition
+
+    type: Literal["function"]
+    """The type of tool being defined: `function`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py
new file mode 100644
index 00000000..d906e02b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/function_tool_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.function_definition import FunctionDefinition
+
+__all__ = ["FunctionToolParam"]
+
+
+class FunctionToolParam(TypedDict, total=False):
+    function: Required[FunctionDefinition]
+
+    type: Required[Literal["function"]]
+    """The type of tool being defined: `function`"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py
new file mode 100644
index 00000000..0374b9b4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/__init__.py
@@ -0,0 +1,96 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .session import Session as Session
+from .error_event import ErrorEvent as ErrorEvent
+from .conversation_item import ConversationItem as ConversationItem
+from .realtime_response import RealtimeResponse as RealtimeResponse
+from .response_done_event import ResponseDoneEvent as ResponseDoneEvent
+from .session_update_event import SessionUpdateEvent as SessionUpdateEvent
+from .realtime_client_event import RealtimeClientEvent as RealtimeClientEvent
+from .realtime_server_event import RealtimeServerEvent as RealtimeServerEvent
+from .response_cancel_event import ResponseCancelEvent as ResponseCancelEvent
+from .response_create_event import ResponseCreateEvent as ResponseCreateEvent
+from .session_create_params import SessionCreateParams as SessionCreateParams
+from .session_created_event import SessionCreatedEvent as SessionCreatedEvent
+from .session_updated_event import SessionUpdatedEvent as SessionUpdatedEvent
+from .transcription_session import TranscriptionSession as TranscriptionSession
+from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent
+from .conversation_item_param import ConversationItemParam as ConversationItemParam
+from .realtime_connect_params import RealtimeConnectParams as RealtimeConnectParams
+from .realtime_response_usage import RealtimeResponseUsage as RealtimeResponseUsage
+from .session_create_response import SessionCreateResponse as SessionCreateResponse
+from .realtime_response_status import RealtimeResponseStatus as RealtimeResponseStatus
+from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent
+from .conversation_item_content import ConversationItemContent as ConversationItemContent
+from .rate_limits_updated_event import RateLimitsUpdatedEvent as RateLimitsUpdatedEvent
+from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent
+from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent
+from .conversation_created_event import ConversationCreatedEvent as ConversationCreatedEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
+from .session_update_event_param import SessionUpdateEventParam as SessionUpdateEventParam
+from .realtime_client_event_param import RealtimeClientEventParam as RealtimeClientEventParam
+from .response_cancel_event_param import ResponseCancelEventParam as ResponseCancelEventParam
+from .response_create_event_param import ResponseCreateEventParam as ResponseCreateEventParam
+from .transcription_session_update import TranscriptionSessionUpdate as TranscriptionSessionUpdate
+from .conversation_item_create_event import ConversationItemCreateEvent as ConversationItemCreateEvent
+from .conversation_item_delete_event import ConversationItemDeleteEvent as ConversationItemDeleteEvent
+from .input_audio_buffer_clear_event import InputAudioBufferClearEvent as InputAudioBufferClearEvent
+from .conversation_item_content_param import ConversationItemContentParam as ConversationItemContentParam
+from .conversation_item_created_event import ConversationItemCreatedEvent as ConversationItemCreatedEvent
+from .conversation_item_deleted_event import ConversationItemDeletedEvent as ConversationItemDeletedEvent
+from .input_audio_buffer_append_event import InputAudioBufferAppendEvent as InputAudioBufferAppendEvent
+from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent as InputAudioBufferCommitEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
+from .conversation_item_retrieve_event import ConversationItemRetrieveEvent as ConversationItemRetrieveEvent
+from .conversation_item_truncate_event import ConversationItemTruncateEvent as ConversationItemTruncateEvent
+from .conversation_item_with_reference import ConversationItemWithReference as ConversationItemWithReference
+from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent as InputAudioBufferClearedEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
+from .conversation_item_truncated_event import ConversationItemTruncatedEvent as ConversationItemTruncatedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
+from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent
+from .transcription_session_update_param import TranscriptionSessionUpdateParam as TranscriptionSessionUpdateParam
+from .transcription_session_create_params import TranscriptionSessionCreateParams as TranscriptionSessionCreateParams
+from .transcription_session_updated_event import TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent
+from .conversation_item_create_event_param import ConversationItemCreateEventParam as ConversationItemCreateEventParam
+from .conversation_item_delete_event_param import ConversationItemDeleteEventParam as ConversationItemDeleteEventParam
+from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam as InputAudioBufferClearEventParam
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent
+from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam as InputAudioBufferAppendEventParam
+from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam as InputAudioBufferCommitEventParam
+from .response_audio_transcript_delta_event import (
+    ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
+)
+from .conversation_item_retrieve_event_param import (
+    ConversationItemRetrieveEventParam as ConversationItemRetrieveEventParam,
+)
+from .conversation_item_truncate_event_param import (
+    ConversationItemTruncateEventParam as ConversationItemTruncateEventParam,
+)
+from .conversation_item_with_reference_param import (
+    ConversationItemWithReferenceParam as ConversationItemWithReferenceParam,
+)
+from .input_audio_buffer_speech_started_event import (
+    InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent,
+)
+from .input_audio_buffer_speech_stopped_event import (
+    InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent,
+)
+from .response_function_call_arguments_done_event import (
+    ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
+)
+from .response_function_call_arguments_delta_event import (
+    ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
+)
+from .conversation_item_input_audio_transcription_delta_event import (
+    ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent,
+)
+from .conversation_item_input_audio_transcription_failed_event import (
+    ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent,
+)
+from .conversation_item_input_audio_transcription_completed_event import (
+    ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py
new file mode 100644
index 00000000..4ba05408
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_created_event.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationCreatedEvent", "Conversation"]
+
+
+class Conversation(BaseModel):
+    id: Optional[str] = None
+    """The unique ID of the conversation."""
+
+    object: Optional[Literal["realtime.conversation"]] = None
+    """The object type, must be `realtime.conversation`."""
+
+
+class ConversationCreatedEvent(BaseModel):
+    conversation: Conversation
+    """The conversation resource."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    type: Literal["conversation.created"]
+    """The event type, must be `conversation.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py
new file mode 100644
index 00000000..4edf6c4d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item.py
@@ -0,0 +1,61 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item_content import ConversationItemContent
+
+__all__ = ["ConversationItem"]
+
+
+class ConversationItem(BaseModel):
+    id: Optional[str] = None
+    """
+    The unique ID of the item, this can be generated by the client to help manage
+    server-side context, but is not required because the server will generate one if
+    not provided.
+    """
+
+    arguments: Optional[str] = None
+    """The arguments of the function call (for `function_call` items)."""
+
+    call_id: Optional[str] = None
+    """
+    The ID of the function call (for `function_call` and `function_call_output`
+    items). If passed on a `function_call_output` item, the server will check that a
+    `function_call` item with the same ID exists in the conversation history.
+    """
+
+    content: Optional[List[ConversationItemContent]] = None
+    """The content of the message, applicable for `message` items.
+
+    - Message items of role `system` support only `input_text` content
+    - Message items of role `user` support `input_text` and `input_audio` content
+    - Message items of role `assistant` support `text` content.
+    """
+
+    name: Optional[str] = None
+    """The name of the function being called (for `function_call` items)."""
+
+    object: Optional[Literal["realtime.item"]] = None
+    """Identifier for the API object being returned - always `realtime.item`."""
+
+    output: Optional[str] = None
+    """The output of the function call (for `function_call_output` items)."""
+
+    role: Optional[Literal["user", "assistant", "system"]] = None
+    """
+    The role of the message sender (`user`, `assistant`, `system`), only applicable
+    for `message` items.
+    """
+
+    status: Optional[Literal["completed", "incomplete"]] = None
+    """The status of the item (`completed`, `incomplete`).
+
+    These have no effect on the conversation, but are accepted for consistency with
+    the `conversation.item.created` event.
+    """
+
+    type: Optional[Literal["message", "function_call", "function_call_output"]] = None
+    """The type of the item (`message`, `function_call`, `function_call_output`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py
new file mode 100644
index 00000000..ab40a4a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemContent"]
+
+
+class ConversationItemContent(BaseModel):
+    id: Optional[str] = None
+    """
+    ID of a previous conversation item to reference (for `item_reference` content
+    types in `response.create` events). These can reference both client and server
+    created items.
+    """
+
+    audio: Optional[str] = None
+    """Base64-encoded audio bytes, used for `input_audio` content type."""
+
+    text: Optional[str] = None
+    """The text content, used for `input_text` and `text` content types."""
+
+    transcript: Optional[str] = None
+    """The transcript of the audio, used for `input_audio` content type."""
+
+    type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None
+    """The content type (`input_text`, `input_audio`, `item_reference`, `text`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py
new file mode 100644
index 00000000..7a3a92a3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_content_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ConversationItemContentParam"]
+
+
+class ConversationItemContentParam(TypedDict, total=False):
+    id: str
+    """
+    ID of a previous conversation item to reference (for `item_reference` content
+    types in `response.create` events). These can reference both client and server
+    created items.
+    """
+
+    audio: str
+    """Base64-encoded audio bytes, used for `input_audio` content type."""
+
+    text: str
+    """The text content, used for `input_text` and `text` content types."""
+
+    transcript: str
+    """The transcript of the audio, used for `input_audio` content type."""
+
+    type: Literal["input_text", "input_audio", "item_reference", "text"]
+    """The content type (`input_text`, `input_audio`, `item_reference`, `text`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py
new file mode 100644
index 00000000..f19d552a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ConversationItemCreateEvent"]
+
+
+class ConversationItemCreateEvent(BaseModel):
+    item: ConversationItem
+    """The item to add to the conversation."""
+
+    type: Literal["conversation.item.create"]
+    """The event type, must be `conversation.item.create`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
+
+    previous_item_id: Optional[str] = None
+    """The ID of the preceding item after which the new item will be inserted.
+
+    If not set, the new item will be appended to the end of the conversation. If set
+    to `root`, the new item will be added to the beginning of the conversation. If
+    set to an existing ID, it allows an item to be inserted mid-conversation. If the
+    ID cannot be found, an error will be returned and the item will not be added.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py
new file mode 100644
index 00000000..693d0fd5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_create_event_param.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .conversation_item_param import ConversationItemParam
+
+__all__ = ["ConversationItemCreateEventParam"]
+
+
+class ConversationItemCreateEventParam(TypedDict, total=False):
+    item: Required[ConversationItemParam]
+    """The item to add to the conversation."""
+
+    type: Required[Literal["conversation.item.create"]]
+    """The event type, must be `conversation.item.create`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
+
+    previous_item_id: str
+    """The ID of the preceding item after which the new item will be inserted.
+
+    If not set, the new item will be appended to the end of the conversation. If set
+    to `root`, the new item will be added to the beginning of the conversation. If
+    set to an existing ID, it allows an item to be inserted mid-conversation. If the
+    ID cannot be found, an error will be returned and the item will not be added.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py
new file mode 100644
index 00000000..2f203882
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_created_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ConversationItemCreatedEvent"]
+
+
+class ConversationItemCreatedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    item: ConversationItem
+    """The item to add to the conversation."""
+
+    previous_item_id: str
+    """
+    The ID of the preceding item in the Conversation context, allows the client to
+    understand the order of the conversation.
+    """
+
+    type: Literal["conversation.item.created"]
+    """The event type, must be `conversation.item.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py
new file mode 100644
index 00000000..02ca8250
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemDeleteEvent"]
+
+
+class ConversationItemDeleteEvent(BaseModel):
+    item_id: str
+    """The ID of the item to delete."""
+
+    type: Literal["conversation.item.delete"]
+    """The event type, must be `conversation.item.delete`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py
new file mode 100644
index 00000000..c3f88d66
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_delete_event_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConversationItemDeleteEventParam"]
+
+
+class ConversationItemDeleteEventParam(TypedDict, total=False):
+    item_id: Required[str]
+    """The ID of the item to delete."""
+
+    type: Required[Literal["conversation.item.delete"]]
+    """The event type, must be `conversation.item.delete`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py
new file mode 100644
index 00000000..a35a9781
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_deleted_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemDeletedEvent"]
+
+
+class ConversationItemDeletedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item that was deleted."""
+
+    type: Literal["conversation.item.deleted"]
+    """The event type, must be `conversation.item.deleted`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py
new file mode 100644
index 00000000..46981169
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+    token: str
+    """The token that was used to generate the log probability."""
+
+    bytes: List[int]
+    """The bytes that were used to generate the log probability."""
+
+    logprob: float
+    """The log probability of the token."""
+
+
+class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel):
+    content_index: int
+    """The index of the content part containing the audio."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the user message item containing the audio."""
+
+    transcript: str
+    """The transcribed text."""
+
+    type: Literal["conversation.item.input_audio_transcription.completed"]
+    """
+    The event type, must be `conversation.item.input_audio_transcription.completed`.
+    """
+
+    logprobs: Optional[List[Logprob]] = None
+    """The log probabilities of the transcription."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py
new file mode 100644
index 00000000..924d06d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemInputAudioTranscriptionDeltaEvent", "Logprob"]
+
+
+class Logprob(BaseModel):
+    token: str
+    """The token that was used to generate the log probability."""
+
+    bytes: List[int]
+    """The bytes that were used to generate the log probability."""
+
+    logprob: float
+    """The log probability of the token."""
+
+
+class ConversationItemInputAudioTranscriptionDeltaEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    type: Literal["conversation.item.input_audio_transcription.delta"]
+    """The event type, must be `conversation.item.input_audio_transcription.delta`."""
+
+    content_index: Optional[int] = None
+    """The index of the content part in the item's content array."""
+
+    delta: Optional[str] = None
+    """The text delta."""
+
+    logprobs: Optional[List[Logprob]] = None
+    """The log probabilities of the transcription."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py
new file mode 100644
index 00000000..cecac93e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemInputAudioTranscriptionFailedEvent", "Error"]
+
+
+class Error(BaseModel):
+    code: Optional[str] = None
+    """Error code, if any."""
+
+    message: Optional[str] = None
+    """A human-readable error message."""
+
+    param: Optional[str] = None
+    """Parameter related to the error, if any."""
+
+    type: Optional[str] = None
+    """The type of error."""
+
+
+class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel):
+    content_index: int
+    """The index of the content part containing the audio."""
+
+    error: Error
+    """Details of the transcription error."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the user message item."""
+
+    type: Literal["conversation.item.input_audio_transcription.failed"]
+    """The event type, must be `conversation.item.input_audio_transcription.failed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py
new file mode 100644
index 00000000..ac0f8431
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_param.py
@@ -0,0 +1,62 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, TypedDict
+
+from .conversation_item_content_param import ConversationItemContentParam
+
+__all__ = ["ConversationItemParam"]
+
+
+class ConversationItemParam(TypedDict, total=False):
+    id: str
+    """
+    The unique ID of the item, this can be generated by the client to help manage
+    server-side context, but is not required because the server will generate one if
+    not provided.
+    """
+
+    arguments: str
+    """The arguments of the function call (for `function_call` items)."""
+
+    call_id: str
+    """
+    The ID of the function call (for `function_call` and `function_call_output`
+    items). If passed on a `function_call_output` item, the server will check that a
+    `function_call` item with the same ID exists in the conversation history.
+    """
+
+    content: Iterable[ConversationItemContentParam]
+    """The content of the message, applicable for `message` items.
+
+    - Message items of role `system` support only `input_text` content
+    - Message items of role `user` support `input_text` and `input_audio` content
+    - Message items of role `assistant` support `text` content.
+    """
+
+    name: str
+    """The name of the function being called (for `function_call` items)."""
+
+    object: Literal["realtime.item"]
+    """Identifier for the API object being returned - always `realtime.item`."""
+
+    output: str
+    """The output of the function call (for `function_call_output` items)."""
+
+    role: Literal["user", "assistant", "system"]
+    """
+    The role of the message sender (`user`, `assistant`, `system`), only applicable
+    for `message` items.
+    """
+
+    status: Literal["completed", "incomplete"]
+    """The status of the item (`completed`, `incomplete`).
+
+    These have no effect on the conversation, but are accepted for consistency with
+    the `conversation.item.created` event.
+    """
+
+    type: Literal["message", "function_call", "function_call_output"]
+    """The type of the item (`message`, `function_call`, `function_call_output`)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py
new file mode 100644
index 00000000..82238605
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemRetrieveEvent"]
+
+
+class ConversationItemRetrieveEvent(BaseModel):
+    item_id: str
+    """The ID of the item to retrieve."""
+
+    type: Literal["conversation.item.retrieve"]
+    """The event type, must be `conversation.item.retrieve`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py
new file mode 100644
index 00000000..71b3ffa4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_retrieve_event_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConversationItemRetrieveEventParam"]
+
+
+class ConversationItemRetrieveEventParam(TypedDict, total=False):
+    item_id: Required[str]
+    """The ID of the item to retrieve."""
+
+    type: Required[Literal["conversation.item.retrieve"]]
+    """The event type, must be `conversation.item.retrieve`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py
new file mode 100644
index 00000000..cb336bba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemTruncateEvent"]
+
+
+class ConversationItemTruncateEvent(BaseModel):
+    audio_end_ms: int
+    """Inclusive duration up to which audio is truncated, in milliseconds.
+
+    If the audio_end_ms is greater than the actual audio duration, the server will
+    respond with an error.
+    """
+
+    content_index: int
+    """The index of the content part to truncate. Set this to 0."""
+
+    item_id: str
+    """The ID of the assistant message item to truncate.
+
+    Only assistant message items can be truncated.
+    """
+
+    type: Literal["conversation.item.truncate"]
+    """The event type, must be `conversation.item.truncate`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py
new file mode 100644
index 00000000..d3ad1e1e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncate_event_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConversationItemTruncateEventParam"]
+
+
+class ConversationItemTruncateEventParam(TypedDict, total=False):
+    audio_end_ms: Required[int]
+    """Inclusive duration up to which audio is truncated, in milliseconds.
+
+    If the audio_end_ms is greater than the actual audio duration, the server will
+    respond with an error.
+    """
+
+    content_index: Required[int]
+    """The index of the content part to truncate. Set this to 0."""
+
+    item_id: Required[str]
+    """The ID of the assistant message item to truncate.
+
+    Only assistant message items can be truncated.
+    """
+
+    type: Required[Literal["conversation.item.truncate"]]
+    """The event type, must be `conversation.item.truncate`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py
new file mode 100644
index 00000000..36368fa2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_truncated_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConversationItemTruncatedEvent"]
+
+
+class ConversationItemTruncatedEvent(BaseModel):
+    audio_end_ms: int
+    """The duration up to which the audio was truncated, in milliseconds."""
+
+    content_index: int
+    """The index of the content part that was truncated."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the assistant message item that was truncated."""
+
+    type: Literal["conversation.item.truncated"]
+    """The event type, must be `conversation.item.truncated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py
new file mode 100644
index 00000000..31806afc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item_content import ConversationItemContent
+
+__all__ = ["ConversationItemWithReference"]
+
+
+class ConversationItemWithReference(BaseModel):
+    id: Optional[str] = None
+    """
+    For an item of type (`message` | `function_call` | `function_call_output`) this
+    field allows the client to assign the unique ID of the item. It is not required
+    because the server will generate one if not provided.
+
+    For an item of type `item_reference`, this field is required and is a reference
+    to any item that has previously existed in the conversation.
+    """
+
+    arguments: Optional[str] = None
+    """The arguments of the function call (for `function_call` items)."""
+
+    call_id: Optional[str] = None
+    """
+    The ID of the function call (for `function_call` and `function_call_output`
+    items). If passed on a `function_call_output` item, the server will check that a
+    `function_call` item with the same ID exists in the conversation history.
+    """
+
+    content: Optional[List[ConversationItemContent]] = None
+    """The content of the message, applicable for `message` items.
+
+    - Message items of role `system` support only `input_text` content
+    - Message items of role `user` support `input_text` and `input_audio` content
+    - Message items of role `assistant` support `text` content.
+    """
+
+    name: Optional[str] = None
+    """The name of the function being called (for `function_call` items)."""
+
+    object: Optional[Literal["realtime.item"]] = None
+    """Identifier for the API object being returned - always `realtime.item`."""
+
+    output: Optional[str] = None
+    """The output of the function call (for `function_call_output` items)."""
+
+    role: Optional[Literal["user", "assistant", "system"]] = None
+    """
+    The role of the message sender (`user`, `assistant`, `system`), only applicable
+    for `message` items.
+    """
+
+    status: Optional[Literal["completed", "incomplete"]] = None
+    """The status of the item (`completed`, `incomplete`).
+
+    These have no effect on the conversation, but are accepted for consistency with
+    the `conversation.item.created` event.
+    """
+
+    type: Optional[Literal["message", "function_call", "function_call_output", "item_reference"]] = None
+    """
+    The type of the item (`message`, `function_call`, `function_call_output`,
+    `item_reference`).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py
new file mode 100644
index 00000000..e266cdce
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/conversation_item_with_reference_param.py
@@ -0,0 +1,68 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, TypedDict
+
+from .conversation_item_content_param import ConversationItemContentParam
+
+__all__ = ["ConversationItemWithReferenceParam"]
+
+
+class ConversationItemWithReferenceParam(TypedDict, total=False):
+    id: str
+    """
+    For an item of type (`message` | `function_call` | `function_call_output`) this
+    field allows the client to assign the unique ID of the item. It is not required
+    because the server will generate one if not provided.
+
+    For an item of type `item_reference`, this field is required and is a reference
+    to any item that has previously existed in the conversation.
+    """
+
+    arguments: str
+    """The arguments of the function call (for `function_call` items)."""
+
+    call_id: str
+    """
+    The ID of the function call (for `function_call` and `function_call_output`
+    items). If passed on a `function_call_output` item, the server will check that a
+    `function_call` item with the same ID exists in the conversation history.
+    """
+
+    content: Iterable[ConversationItemContentParam]
+    """The content of the message, applicable for `message` items.
+
+    - Message items of role `system` support only `input_text` content
+    - Message items of role `user` support `input_text` and `input_audio` content
+    - Message items of role `assistant` support `text` content.
+    """
+
+    name: str
+    """The name of the function being called (for `function_call` items)."""
+
+    object: Literal["realtime.item"]
+    """Identifier for the API object being returned - always `realtime.item`."""
+
+    output: str
+    """The output of the function call (for `function_call_output` items)."""
+
+    role: Literal["user", "assistant", "system"]
+    """
+    The role of the message sender (`user`, `assistant`, `system`), only applicable
+    for `message` items.
+    """
+
+    status: Literal["completed", "incomplete"]
+    """The status of the item (`completed`, `incomplete`).
+
+    These have no effect on the conversation, but are accepted for consistency with
+    the `conversation.item.created` event.
+    """
+
+    type: Literal["message", "function_call", "function_call_output", "item_reference"]
+    """
+    The type of the item (`message`, `function_call`, `function_call_output`,
+    `item_reference`).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py
new file mode 100644
index 00000000..e020fc38
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/error_event.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ErrorEvent", "Error"]
+
+
+class Error(BaseModel):
+    message: str
+    """A human-readable error message."""
+
+    type: str
+    """The type of error (e.g., "invalid_request_error", "server_error")."""
+
+    code: Optional[str] = None
+    """Error code, if any."""
+
+    event_id: Optional[str] = None
+    """The event_id of the client event that caused the error, if applicable."""
+
+    param: Optional[str] = None
+    """Parameter related to the error, if any."""
+
+
+class ErrorEvent(BaseModel):
+    error: Error
+    """Details of the error."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    type: Literal["error"]
+    """The event type, must be `error`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py
new file mode 100644
index 00000000..a253a648
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferAppendEvent"]
+
+
+class InputAudioBufferAppendEvent(BaseModel):
+    audio: str
+    """Base64-encoded audio bytes.
+
+    This must be in the format specified by the `input_audio_format` field in the
+    session configuration.
+    """
+
+    type: Literal["input_audio_buffer.append"]
+    """The event type, must be `input_audio_buffer.append`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py
new file mode 100644
index 00000000..3ad0bc73
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_append_event_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InputAudioBufferAppendEventParam"]
+
+
+class InputAudioBufferAppendEventParam(TypedDict, total=False):
+    audio: Required[str]
+    """Base64-encoded audio bytes.
+
+    This must be in the format specified by the `input_audio_format` field in the
+    session configuration.
+    """
+
+    type: Required[Literal["input_audio_buffer.append"]]
+    """The event type, must be `input_audio_buffer.append`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py
new file mode 100644
index 00000000..b0624d34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferClearEvent"]
+
+
+class InputAudioBufferClearEvent(BaseModel):
+    type: Literal["input_audio_buffer.clear"]
+    """The event type, must be `input_audio_buffer.clear`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py
new file mode 100644
index 00000000..2bd6bc5a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InputAudioBufferClearEventParam"]
+
+
+class InputAudioBufferClearEventParam(TypedDict, total=False):
+    type: Required[Literal["input_audio_buffer.clear"]]
+    """The event type, must be `input_audio_buffer.clear`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py
new file mode 100644
index 00000000..632e1b94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_cleared_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferClearedEvent"]
+
+
+class InputAudioBufferClearedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    type: Literal["input_audio_buffer.cleared"]
+    """The event type, must be `input_audio_buffer.cleared`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py
new file mode 100644
index 00000000..7b6f5e46
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferCommitEvent"]
+
+
+class InputAudioBufferCommitEvent(BaseModel):
+    type: Literal["input_audio_buffer.commit"]
+    """The event type, must be `input_audio_buffer.commit`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py
new file mode 100644
index 00000000..c9c927ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InputAudioBufferCommitEventParam"]
+
+
+class InputAudioBufferCommitEventParam(TypedDict, total=False):
+    type: Required[Literal["input_audio_buffer.commit"]]
+    """The event type, must be `input_audio_buffer.commit`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py
new file mode 100644
index 00000000..3071eff3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_committed_event.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferCommittedEvent"]
+
+
+class InputAudioBufferCommittedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the user message item that will be created."""
+
+    previous_item_id: str
+    """The ID of the preceding item after which the new item will be inserted."""
+
+    type: Literal["input_audio_buffer.committed"]
+    """The event type, must be `input_audio_buffer.committed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py
new file mode 100644
index 00000000..4f3ab082
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferSpeechStartedEvent"]
+
+
+class InputAudioBufferSpeechStartedEvent(BaseModel):
+    audio_start_ms: int
+    """
+    Milliseconds from the start of all audio written to the buffer during the
+    session when speech was first detected. This will correspond to the beginning of
+    audio sent to the model, and thus includes the `prefix_padding_ms` configured in
+    the Session.
+    """
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the user message item that will be created when speech stops."""
+
+    type: Literal["input_audio_buffer.speech_started"]
+    """The event type, must be `input_audio_buffer.speech_started`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py
new file mode 100644
index 00000000..40568170
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputAudioBufferSpeechStoppedEvent"]
+
+
+class InputAudioBufferSpeechStoppedEvent(BaseModel):
+    audio_end_ms: int
+    """Milliseconds since the session started when speech stopped.
+
+    This will correspond to the end of audio sent to the model, and thus includes
+    the `min_silence_duration_ms` configured in the Session.
+    """
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the user message item that will be created."""
+
+    type: Literal["input_audio_buffer.speech_stopped"]
+    """The event type, must be `input_audio_buffer.speech_stopped`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py
new file mode 100644
index 00000000..7e12283c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/rate_limits_updated_event.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RateLimitsUpdatedEvent", "RateLimit"]
+
+
+class RateLimit(BaseModel):
+    limit: Optional[int] = None
+    """The maximum allowed value for the rate limit."""
+
+    name: Optional[Literal["requests", "tokens"]] = None
+    """The name of the rate limit (`requests`, `tokens`)."""
+
+    remaining: Optional[int] = None
+    """The remaining value before the limit is reached."""
+
+    reset_seconds: Optional[float] = None
+    """Seconds until the rate limit resets."""
+
+
+class RateLimitsUpdatedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    rate_limits: List[RateLimit]
+    """List of rate limit information."""
+
+    type: Literal["rate_limits.updated"]
+    """The event type, must be `rate_limits.updated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py
new file mode 100644
index 00000000..f962a505
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .session_update_event import SessionUpdateEvent
+from .response_cancel_event import ResponseCancelEvent
+from .response_create_event import ResponseCreateEvent
+from .transcription_session_update import TranscriptionSessionUpdate
+from .conversation_item_create_event import ConversationItemCreateEvent
+from .conversation_item_delete_event import ConversationItemDeleteEvent
+from .input_audio_buffer_clear_event import InputAudioBufferClearEvent
+from .input_audio_buffer_append_event import InputAudioBufferAppendEvent
+from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent
+from .conversation_item_retrieve_event import ConversationItemRetrieveEvent
+from .conversation_item_truncate_event import ConversationItemTruncateEvent
+
+__all__ = ["RealtimeClientEvent"]
+
+RealtimeClientEvent: TypeAlias = Annotated[
+    Union[
+        ConversationItemCreateEvent,
+        ConversationItemDeleteEvent,
+        ConversationItemRetrieveEvent,
+        ConversationItemTruncateEvent,
+        InputAudioBufferAppendEvent,
+        InputAudioBufferClearEvent,
+        InputAudioBufferCommitEvent,
+        ResponseCancelEvent,
+        ResponseCreateEvent,
+        SessionUpdateEvent,
+        TranscriptionSessionUpdate,
+    ],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py
new file mode 100644
index 00000000..6fdba4b8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_client_event_param.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .session_update_event_param import SessionUpdateEventParam
+from .response_cancel_event_param import ResponseCancelEventParam
+from .response_create_event_param import ResponseCreateEventParam
+from .transcription_session_update_param import TranscriptionSessionUpdateParam
+from .conversation_item_create_event_param import ConversationItemCreateEventParam
+from .conversation_item_delete_event_param import ConversationItemDeleteEventParam
+from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam
+from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam
+from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam
+from .conversation_item_retrieve_event_param import ConversationItemRetrieveEventParam
+from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam
+
+__all__ = ["RealtimeClientEventParam"]
+
+RealtimeClientEventParam: TypeAlias = Union[
+    ConversationItemCreateEventParam,
+    ConversationItemDeleteEventParam,
+    ConversationItemRetrieveEventParam,
+    ConversationItemTruncateEventParam,
+    InputAudioBufferAppendEventParam,
+    InputAudioBufferClearEventParam,
+    InputAudioBufferCommitEventParam,
+    ResponseCancelEventParam,
+    ResponseCreateEventParam,
+    SessionUpdateEventParam,
+    TranscriptionSessionUpdateParam,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py
new file mode 100644
index 00000000..76474f3d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_connect_params.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["RealtimeConnectParams"]
+
+
+class RealtimeConnectParams(TypedDict, total=False):
+    model: Required[str]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py
new file mode 100644
index 00000000..4c3c83d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response.py
@@ -0,0 +1,87 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from ...shared.metadata import Metadata
+from .conversation_item import ConversationItem
+from .realtime_response_usage import RealtimeResponseUsage
+from .realtime_response_status import RealtimeResponseStatus
+
+__all__ = ["RealtimeResponse"]
+
+
+class RealtimeResponse(BaseModel):
+    id: Optional[str] = None
+    """The unique ID of the response."""
+
+    conversation_id: Optional[str] = None
+    """
+    Which conversation the response is added to, determined by the `conversation`
+    field in the `response.create` event. If `auto`, the response will be added to
+    the default conversation and the value of `conversation_id` will be an id like
+    `conv_1234`. If `none`, the response will not be added to any conversation and
+    the value of `conversation_id` will be `null`. If responses are being triggered
+    by server VAD, the response will be added to the default conversation, thus the
+    `conversation_id` will be an id like `conv_1234`.
+    """
+
+    max_output_tokens: Union[int, Literal["inf"], None] = None
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls, that was used in this response.
+    """
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    modalities: Optional[List[Literal["text", "audio"]]] = None
+    """The set of modalities the model used to respond.
+
+    If there are multiple modalities, the model will pick one, for example if
+    `modalities` is `["text", "audio"]`, the model could be responding in either
+    text or audio.
+    """
+
+    object: Optional[Literal["realtime.response"]] = None
+    """The object type, must be `realtime.response`."""
+
+    output: Optional[List[ConversationItem]] = None
+    """The list of output items generated by the response."""
+
+    output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+    """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+    status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None
+    """
+    The final status of the response (`completed`, `cancelled`, `failed`, or
+    `incomplete`).
+    """
+
+    status_details: Optional[RealtimeResponseStatus] = None
+    """Additional details about the status."""
+
+    temperature: Optional[float] = None
+    """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+    usage: Optional[RealtimeResponseUsage] = None
+    """Usage statistics for the Response, this will correspond to billing.
+
+    A Realtime API session will maintain a conversation context and append new Items
+    to the Conversation, thus output from previous turns (text and audio tokens)
+    will become the input for later turns.
+    """
+
+    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    """
+    The voice the model used to respond. Current voice options are `alloy`, `ash`,
+    `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py
new file mode 100644
index 00000000..7189cd58
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_status.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RealtimeResponseStatus", "Error"]
+
+
+class Error(BaseModel):
+    code: Optional[str] = None
+    """Error code, if any."""
+
+    type: Optional[str] = None
+    """The type of error."""
+
+
+class RealtimeResponseStatus(BaseModel):
+    error: Optional[Error] = None
+    """
+    A description of the error that caused the response to fail, populated when the
+    `status` is `failed`.
+    """
+
+    reason: Optional[Literal["turn_detected", "client_cancelled", "max_output_tokens", "content_filter"]] = None
+    """The reason the Response did not complete.
+
+    For a `cancelled` Response, one of `turn_detected` (the server VAD detected a
+    new start of speech) or `client_cancelled` (the client sent a cancel event). For
+    an `incomplete` Response, one of `max_output_tokens` or `content_filter` (the
+    server-side safety filter activated and cut off the response).
+    """
+
+    type: Optional[Literal["completed", "cancelled", "incomplete", "failed"]] = None
+    """
+    The type of error that caused the response to fail, corresponding with the
+    `status` field (`completed`, `cancelled`, `incomplete`, `failed`).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py
new file mode 100644
index 00000000..7ca822e2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_response_usage.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["RealtimeResponseUsage", "InputTokenDetails", "OutputTokenDetails"]
+
+
+class InputTokenDetails(BaseModel):
+    audio_tokens: Optional[int] = None
+    """The number of audio tokens used in the Response."""
+
+    cached_tokens: Optional[int] = None
+    """The number of cached tokens used in the Response."""
+
+    text_tokens: Optional[int] = None
+    """The number of text tokens used in the Response."""
+
+
+class OutputTokenDetails(BaseModel):
+    audio_tokens: Optional[int] = None
+    """The number of audio tokens used in the Response."""
+
+    text_tokens: Optional[int] = None
+    """The number of text tokens used in the Response."""
+
+
+class RealtimeResponseUsage(BaseModel):
+    input_token_details: Optional[InputTokenDetails] = None
+    """Details about the input tokens used in the Response."""
+
+    input_tokens: Optional[int] = None
+    """
+    The number of input tokens used in the Response, including text and audio
+    tokens.
+    """
+
+    output_token_details: Optional[OutputTokenDetails] = None
+    """Details about the output tokens used in the Response."""
+
+    output_tokens: Optional[int] = None
+    """
+    The number of output tokens sent in the Response, including text and audio
+    tokens.
+    """
+
+    total_tokens: Optional[int] = None
+    """
+    The total number of tokens in the Response including input and output text and
+    audio tokens.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py
new file mode 100644
index 00000000..ba1d3244
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/realtime_server_event.py
@@ -0,0 +1,91 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+from .error_event import ErrorEvent
+from .conversation_item import ConversationItem
+from .response_done_event import ResponseDoneEvent
+from .session_created_event import SessionCreatedEvent
+from .session_updated_event import SessionUpdatedEvent
+from .response_created_event import ResponseCreatedEvent
+from .response_text_done_event import ResponseTextDoneEvent
+from .rate_limits_updated_event import RateLimitsUpdatedEvent
+from .response_audio_done_event import ResponseAudioDoneEvent
+from .response_text_delta_event import ResponseTextDeltaEvent
+from .conversation_created_event import ConversationCreatedEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent
+from .conversation_item_created_event import ConversationItemCreatedEvent
+from .conversation_item_deleted_event import ConversationItemDeletedEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent
+from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent
+from .conversation_item_truncated_event import ConversationItemTruncatedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent
+from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent
+from .transcription_session_updated_event import TranscriptionSessionUpdatedEvent
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
+from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
+from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent
+from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent
+from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
+from .conversation_item_input_audio_transcription_delta_event import ConversationItemInputAudioTranscriptionDeltaEvent
+from .conversation_item_input_audio_transcription_failed_event import ConversationItemInputAudioTranscriptionFailedEvent
+from .conversation_item_input_audio_transcription_completed_event import (
+    ConversationItemInputAudioTranscriptionCompletedEvent,
+)
+
+__all__ = ["RealtimeServerEvent", "ConversationItemRetrieved"]
+
+
+class ConversationItemRetrieved(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    item: ConversationItem
+    """The item to add to the conversation."""
+
+    type: Literal["conversation.item.retrieved"]
+    """The event type, must be `conversation.item.retrieved`."""
+
+
+RealtimeServerEvent: TypeAlias = Annotated[
+    Union[
+        ConversationCreatedEvent,
+        ConversationItemCreatedEvent,
+        ConversationItemDeletedEvent,
+        ConversationItemInputAudioTranscriptionCompletedEvent,
+        ConversationItemInputAudioTranscriptionDeltaEvent,
+        ConversationItemInputAudioTranscriptionFailedEvent,
+        ConversationItemRetrieved,
+        ConversationItemTruncatedEvent,
+        ErrorEvent,
+        InputAudioBufferClearedEvent,
+        InputAudioBufferCommittedEvent,
+        InputAudioBufferSpeechStartedEvent,
+        InputAudioBufferSpeechStoppedEvent,
+        RateLimitsUpdatedEvent,
+        ResponseAudioDeltaEvent,
+        ResponseAudioDoneEvent,
+        ResponseAudioTranscriptDeltaEvent,
+        ResponseAudioTranscriptDoneEvent,
+        ResponseContentPartAddedEvent,
+        ResponseContentPartDoneEvent,
+        ResponseCreatedEvent,
+        ResponseDoneEvent,
+        ResponseFunctionCallArgumentsDeltaEvent,
+        ResponseFunctionCallArgumentsDoneEvent,
+        ResponseOutputItemAddedEvent,
+        ResponseOutputItemDoneEvent,
+        ResponseTextDeltaEvent,
+        ResponseTextDoneEvent,
+        SessionCreatedEvent,
+        SessionUpdatedEvent,
+        TranscriptionSessionUpdatedEvent,
+    ],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py
new file mode 100644
index 00000000..8e0128d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioDeltaEvent"]
+
+
+class ResponseAudioDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    delta: str
+    """Base64-encoded audio data delta."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.audio.delta"]
+    """The event type, must be `response.audio.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py
new file mode 100644
index 00000000..68e78bc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_done_event.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioDoneEvent"]
+
+
+class ResponseAudioDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.audio.done"]
+    """The event type, must be `response.audio.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py
new file mode 100644
index 00000000..3609948d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDeltaEvent"]
+
+
+class ResponseAudioTranscriptDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    delta: str
+    """The transcript delta."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.audio_transcript.delta"]
+    """The event type, must be `response.audio_transcript.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py
new file mode 100644
index 00000000..4e4436a9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_audio_transcript_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDoneEvent"]
+
+
+class ResponseAudioTranscriptDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    transcript: str
+    """The final transcript of the audio."""
+
+    type: Literal["response.audio_transcript.done"]
+    """The event type, must be `response.audio_transcript.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py
new file mode 100644
index 00000000..c5ff991e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseCancelEvent"]
+
+
+class ResponseCancelEvent(BaseModel):
+    type: Literal["response.cancel"]
+    """The event type, must be `response.cancel`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
+
+    response_id: Optional[str] = None
+    """
+    A specific response ID to cancel - if not provided, will cancel an in-progress
+    response in the default conversation.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py
new file mode 100644
index 00000000..f3374073
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_cancel_event_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseCancelEventParam"]
+
+
+class ResponseCancelEventParam(TypedDict, total=False):
+    type: Required[Literal["response.cancel"]]
+    """The event type, must be `response.cancel`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
+
+    response_id: str
+    """
+    A specific response ID to cancel - if not provided, will cancel an in-progress
+    response in the default conversation.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py
new file mode 100644
index 00000000..45c8f20f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_added_event.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseContentPartAddedEvent", "Part"]
+
+
+class Part(BaseModel):
+    audio: Optional[str] = None
+    """Base64-encoded audio data (if type is "audio")."""
+
+    text: Optional[str] = None
+    """The text content (if type is "text")."""
+
+    transcript: Optional[str] = None
+    """The transcript of the audio (if type is "audio")."""
+
+    type: Optional[Literal["text", "audio"]] = None
+    """The content type ("text", "audio")."""
+
+
+class ResponseContentPartAddedEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item to which the content part was added."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    part: Part
+    """The content part that was added."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.content_part.added"]
+    """The event type, must be `response.content_part.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py
new file mode 100644
index 00000000..3d161161
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_content_part_done_event.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseContentPartDoneEvent", "Part"]
+
+
+class Part(BaseModel):
+    audio: Optional[str] = None
+    """Base64-encoded audio data (if type is "audio")."""
+
+    text: Optional[str] = None
+    """The text content (if type is "text")."""
+
+    transcript: Optional[str] = None
+    """The transcript of the audio (if type is "audio")."""
+
+    type: Optional[Literal["text", "audio"]] = None
+    """The content type ("text", "audio")."""
+
+
+class ResponseContentPartDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    part: Part
+    """The content part that is done."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.content_part.done"]
+    """The event type, must be `response.content_part.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py
new file mode 100644
index 00000000..d6c5fda9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event.py
@@ -0,0 +1,121 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from ...shared.metadata import Metadata
+from .conversation_item_with_reference import ConversationItemWithReference
+
+__all__ = ["ResponseCreateEvent", "Response", "ResponseTool"]
+
+
+class ResponseTool(BaseModel):
+    description: Optional[str] = None
+    """
+    The description of the function, including guidance on when and how to call it,
+    and guidance about what to tell the user when calling (if anything).
+    """
+
+    name: Optional[str] = None
+    """The name of the function."""
+
+    parameters: Optional[object] = None
+    """Parameters of the function in JSON Schema."""
+
+    type: Optional[Literal["function"]] = None
+    """The type of the tool, i.e. `function`."""
+
+
+class Response(BaseModel):
+    conversation: Union[str, Literal["auto", "none"], None] = None
+    """Controls which conversation the response is added to.
+
+    Currently supports `auto` and `none`, with `auto` as the default value. The
+    `auto` value means that the contents of the response will be added to the
+    default conversation. Set this to `none` to create an out-of-band response which
+    will not add items to default conversation.
+    """
+
+    input: Optional[List[ConversationItemWithReference]] = None
+    """Input items to include in the prompt for the model.
+
+    Using this field creates a new context for this Response instead of using the
+    default conversation. An empty array `[]` will clear the context for this
+    Response. Note that this can include references to items from the default
+    conversation.
+    """
+
+    instructions: Optional[str] = None
+    """The default system instructions (i.e.
+
+    system message) prepended to model calls. This field allows the client to guide
+    the model on desired responses. The model can be instructed on response content
+    and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+    good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+    into your voice", "laugh frequently"). The instructions are not guaranteed to be
+    followed by the model, but they provide guidance to the model on the desired
+    behavior.
+
+    Note that the server sets default instructions which will be used if this field
+    is not set and are visible in the `session.created` event at the start of the
+    session.
+    """
+
+    max_response_output_tokens: Union[int, Literal["inf"], None] = None
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+    `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+    """
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    modalities: Optional[List[Literal["text", "audio"]]] = None
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+    """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+    temperature: Optional[float] = None
+    """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+    tool_choice: Optional[str] = None
+    """How the model chooses tools.
+
+    Options are `auto`, `none`, `required`, or specify a function, like
+    `{"type": "function", "function": {"name": "my_function"}}`.
+    """
+
+    tools: Optional[List[ResponseTool]] = None
+    """Tools (functions) available to the model."""
+
+    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    """The voice the model uses to respond.
+
+    Voice cannot be changed during the session once the model has responded with
+    audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
+
+
+class ResponseCreateEvent(BaseModel):
+    type: Literal["response.create"]
+    """The event type, must be `response.create`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
+
+    response: Optional[Response] = None
+    """Create a new Realtime response with these parameters"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py
new file mode 100644
index 00000000..c02fe1b3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_create_event_param.py
@@ -0,0 +1,122 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ...shared_params.metadata import Metadata
+from .conversation_item_with_reference_param import ConversationItemWithReferenceParam
+
+__all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"]
+
+
+class ResponseTool(TypedDict, total=False):
+    description: str
+    """
+    The description of the function, including guidance on when and how to call it,
+    and guidance about what to tell the user when calling (if anything).
+    """
+
+    name: str
+    """The name of the function."""
+
+    parameters: object
+    """Parameters of the function in JSON Schema."""
+
+    type: Literal["function"]
+    """The type of the tool, i.e. `function`."""
+
+
+class Response(TypedDict, total=False):
+    conversation: Union[str, Literal["auto", "none"]]
+    """Controls which conversation the response is added to.
+
+    Currently supports `auto` and `none`, with `auto` as the default value. The
+    `auto` value means that the contents of the response will be added to the
+    default conversation. Set this to `none` to create an out-of-band response which
+    will not add items to default conversation.
+    """
+
+    input: Iterable[ConversationItemWithReferenceParam]
+    """Input items to include in the prompt for the model.
+
+    Using this field creates a new context for this Response instead of using the
+    default conversation. An empty array `[]` will clear the context for this
+    Response. Note that this can include references to items from the default
+    conversation.
+    """
+
+    instructions: str
+    """The default system instructions (i.e.
+
+    system message) prepended to model calls. This field allows the client to guide
+    the model on desired responses. The model can be instructed on response content
+    and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+    good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+    into your voice", "laugh frequently"). The instructions are not guaranteed to be
+    followed by the model, but they provide guidance to the model on the desired
+    behavior.
+
+    Note that the server sets default instructions which will be used if this field
+    is not set and are visible in the `session.created` event at the start of the
+    session.
+    """
+
+    max_response_output_tokens: Union[int, Literal["inf"]]
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+    `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    modalities: List[Literal["text", "audio"]]
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+    """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+    temperature: float
+    """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+    tool_choice: str
+    """How the model chooses tools.
+
+    Options are `auto`, `none`, `required`, or specify a function, like
+    `{"type": "function", "function": {"name": "my_function"}}`.
+    """
+
+    tools: Iterable[ResponseTool]
+    """Tools (functions) available to the model."""
+
+    voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+    """The voice the model uses to respond.
+
+    Voice cannot be changed during the session once the model has responded with
+    audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
+
+
+class ResponseCreateEventParam(TypedDict, total=False):
+    type: Required[Literal["response.create"]]
+    """The event type, must be `response.create`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
+
+    response: Response
+    """Create a new Realtime response with these parameters"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py
new file mode 100644
index 00000000..a4990cf0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_created_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .realtime_response import RealtimeResponse
+
+__all__ = ["ResponseCreatedEvent"]
+
+
+class ResponseCreatedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    response: RealtimeResponse
+    """The response resource."""
+
+    type: Literal["response.created"]
+    """The event type, must be `response.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py
new file mode 100644
index 00000000..9e655184
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_done_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .realtime_response import RealtimeResponse
+
+__all__ = ["ResponseDoneEvent"]
+
+
+class ResponseDoneEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    response: RealtimeResponse
+    """The response resource."""
+
+    type: Literal["response.done"]
+    """The event type, must be `response.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py
new file mode 100644
index 00000000..cdbb64e6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"]
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
+    call_id: str
+    """The ID of the function call."""
+
+    delta: str
+    """The arguments delta as a JSON string."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the function call item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.function_call_arguments.delta"]
+    """The event type, must be `response.function_call_arguments.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py
new file mode 100644
index 00000000..0a5db533
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_function_call_arguments_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDoneEvent"]
+
+
+class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
+    arguments: str
+    """The final arguments as a JSON string."""
+
+    call_id: str
+    """The ID of the function call."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the function call item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.function_call_arguments.done"]
+    """The event type, must be `response.function_call_arguments.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py
new file mode 100644
index 00000000..c89bfdc3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_added_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ResponseOutputItemAddedEvent"]
+
+
+class ResponseOutputItemAddedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    item: ConversationItem
+    """The item to add to the conversation."""
+
+    output_index: int
+    """The index of the output item in the Response."""
+
+    response_id: str
+    """The ID of the Response to which the item belongs."""
+
+    type: Literal["response.output_item.added"]
+    """The event type, must be `response.output_item.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py
new file mode 100644
index 00000000..b5910e22
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_output_item_done_event.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ResponseOutputItemDoneEvent"]
+
+
+class ResponseOutputItemDoneEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    item: ConversationItem
+    """The item to add to the conversation."""
+
+    output_index: int
+    """The index of the output item in the Response."""
+
+    response_id: str
+    """The ID of the Response to which the item belongs."""
+
+    type: Literal["response.output_item.done"]
+    """The event type, must be `response.output_item.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py
new file mode 100644
index 00000000..c463b3c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_delta_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseTextDeltaEvent"]
+
+
+class ResponseTextDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    delta: str
+    """The text delta."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    type: Literal["response.text.delta"]
+    """The event type, must be `response.text.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py
new file mode 100644
index 00000000..020ff41d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/response_text_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ResponseTextDoneEvent"]
+
+
+class ResponseTextDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part in the item's content array."""
+
+    event_id: str
+    """The unique ID of the server event."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item in the response."""
+
+    response_id: str
+    """The ID of the response."""
+
+    text: str
+    """The final text content."""
+
+    type: Literal["response.text.done"]
+    """The event type, must be `response.text.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py
new file mode 100644
index 00000000..3ed53ff5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session.py
@@ -0,0 +1,227 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["Session", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", "TurnDetection"]
+
+
+class InputAudioNoiseReduction(BaseModel):
+    type: Optional[Literal["near_field", "far_field"]] = None
+    """Type of noise reduction.
+
+    `near_field` is for close-talking microphones such as headphones, `far_field` is
+    for far-field microphones such as laptop or conference room microphones.
+    """
+
+
+class InputAudioTranscription(BaseModel):
+    language: Optional[str] = None
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: Optional[str] = None
+    """
+    The model to use for transcription, current options are `gpt-4o-transcribe`,
+    `gpt-4o-mini-transcribe`, and `whisper-1`.
+    """
+
+    prompt: Optional[str] = None
+    """
+    An optional text to guide the model's style or continue a previous audio
+    segment. For `whisper-1`, the
+    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+    For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+    "expect words related to technology".
+    """
+
+
+class Tool(BaseModel):
+    description: Optional[str] = None
+    """
+    The description of the function, including guidance on when and how to call it,
+    and guidance about what to tell the user when calling (if anything).
+    """
+
+    name: Optional[str] = None
+    """The name of the function."""
+
+    parameters: Optional[object] = None
+    """Parameters of the function in JSON Schema."""
+
+    type: Optional[Literal["function"]] = None
+    """The type of the tool, i.e. `function`."""
+
+
+class TurnDetection(BaseModel):
+    create_response: Optional[bool] = None
+    """
+    Whether or not to automatically generate a response when a VAD stop event
+    occurs.
+    """
+
+    eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
+    """Used only for `semantic_vad` mode.
+
+    The eagerness of the model to respond. `low` will wait longer for the user to
+    continue speaking, `high` will respond more quickly. `auto` is the default and
+    is equivalent to `medium`.
+    """
+
+    interrupt_response: Optional[bool] = None
+    """
+    Whether or not to automatically interrupt any ongoing response with output to
+    the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+    occurs.
+    """
+
+    prefix_padding_ms: Optional[int] = None
+    """Used only for `server_vad` mode.
+
+    Amount of audio to include before the VAD detected speech (in milliseconds).
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: Optional[int] = None
+    """Used only for `server_vad` mode.
+
+    Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+    With shorter values the model will respond more quickly, but may jump in on
+    short pauses from the user.
+    """
+
+    threshold: Optional[float] = None
+    """Used only for `server_vad` mode.
+
+    Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+    threshold will require louder audio to activate the model, and thus might
+    perform better in noisy environments.
+    """
+
+    type: Optional[Literal["server_vad", "semantic_vad"]] = None
+    """Type of turn detection."""
+
+
+class Session(BaseModel):
+    id: Optional[str] = None
+    """Unique identifier for the session that looks like `sess_1234567890abcdef`."""
+
+    input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+    """The format of input audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+    be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+    byte order.
+    """
+
+    input_audio_noise_reduction: Optional[InputAudioNoiseReduction] = None
+    """Configuration for input audio noise reduction.
+
+    This can be set to `null` to turn off. Noise reduction filters audio added to
+    the input audio buffer before it is sent to VAD and the model. Filtering the
+    audio can improve VAD and turn detection accuracy (reducing false positives) and
+    model performance by improving perception of the input audio.
+    """
+
+    input_audio_transcription: Optional[InputAudioTranscription] = None
+    """
+    Configuration for input audio transcription, defaults to off and can be set to
+    `null` to turn off once on. Input audio transcription is not native to the
+    model, since the model consumes audio directly. Transcription runs
+    asynchronously through
+    [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+    and should be treated as guidance of input audio content rather than precisely
+    what the model heard. The client can optionally set the language and prompt for
+    transcription, these offer additional guidance to the transcription service.
+    """
+
+    instructions: Optional[str] = None
+    """The default system instructions (i.e.
+
+    system message) prepended to model calls. This field allows the client to guide
+    the model on desired responses. The model can be instructed on response content
+    and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+    good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+    into your voice", "laugh frequently"). The instructions are not guaranteed to be
+    followed by the model, but they provide guidance to the model on the desired
+    behavior.
+
+    Note that the server sets default instructions which will be used if this field
+    is not set and are visible in the `session.created` event at the start of the
+    session.
+    """
+
+    max_response_output_tokens: Union[int, Literal["inf"], None] = None
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+    `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+    """
+
+    modalities: Optional[List[Literal["text", "audio"]]] = None
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    model: Optional[
+        Literal[
+            "gpt-4o-realtime-preview",
+            "gpt-4o-realtime-preview-2024-10-01",
+            "gpt-4o-realtime-preview-2024-12-17",
+            "gpt-4o-mini-realtime-preview",
+            "gpt-4o-mini-realtime-preview-2024-12-17",
+        ]
+    ] = None
+    """The Realtime model used for this session."""
+
+    output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+    """The format of output audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+    sampled at a rate of 24kHz.
+    """
+
+    temperature: Optional[float] = None
+    """Sampling temperature for the model, limited to [0.6, 1.2].
+
+    For audio models a temperature of 0.8 is highly recommended for best
+    performance.
+    """
+
+    tool_choice: Optional[str] = None
+    """How the model chooses tools.
+
+    Options are `auto`, `none`, `required`, or specify a function.
+    """
+
+    tools: Optional[List[Tool]] = None
+    """Tools (functions) available to the model."""
+
+    turn_detection: Optional[TurnDetection] = None
+    """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+    This can be set to `null` to turn off, in which case the client must manually
+    trigger model response. Server VAD means that the model will detect the start
+    and end of speech based on audio volume and respond at the end of user speech.
+    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    with VAD) to semantically estimate whether the user has finished speaking, then
+    dynamically sets a timeout based on this probability. For example, if user audio
+    trails off with "uhhm", the model will score a low probability of turn end and
+    wait longer for the user to continue speaking. This can be useful for more
+    natural conversations, but may have a higher latency.
+    """
+
+    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    """The voice the model uses to respond.
+
+    Voice cannot be changed during the session once the model has responded with
+    audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py
new file mode 100644
index 00000000..fe4a1c86
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_params.py
@@ -0,0 +1,222 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["SessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", "TurnDetection"]
+
+
+class SessionCreateParams(TypedDict, total=False):
+    input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+    """The format of input audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+    be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+    byte order.
+    """
+
+    input_audio_noise_reduction: InputAudioNoiseReduction
+    """Configuration for input audio noise reduction.
+
+    This can be set to `null` to turn off. Noise reduction filters audio added to
+    the input audio buffer before it is sent to VAD and the model. Filtering the
+    audio can improve VAD and turn detection accuracy (reducing false positives) and
+    model performance by improving perception of the input audio.
+    """
+
+    input_audio_transcription: InputAudioTranscription
+    """
+    Configuration for input audio transcription, defaults to off and can be set to
+    `null` to turn off once on. Input audio transcription is not native to the
+    model, since the model consumes audio directly. Transcription runs
+    asynchronously through
+    [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+    and should be treated as guidance of input audio content rather than precisely
+    what the model heard. The client can optionally set the language and prompt for
+    transcription, these offer additional guidance to the transcription service.
+    """
+
+    instructions: str
+    """The default system instructions (i.e.
+
+    system message) prepended to model calls. This field allows the client to guide
+    the model on desired responses. The model can be instructed on response content
+    and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+    good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+    into your voice", "laugh frequently"). The instructions are not guaranteed to be
+    followed by the model, but they provide guidance to the model on the desired
+    behavior.
+
+    Note that the server sets default instructions which will be used if this field
+    is not set and are visible in the `session.created` event at the start of the
+    session.
+    """
+
+    max_response_output_tokens: Union[int, Literal["inf"]]
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+    `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+    """
+
+    modalities: List[Literal["text", "audio"]]
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    model: Literal[
+        "gpt-4o-realtime-preview",
+        "gpt-4o-realtime-preview-2024-10-01",
+        "gpt-4o-realtime-preview-2024-12-17",
+        "gpt-4o-mini-realtime-preview",
+        "gpt-4o-mini-realtime-preview-2024-12-17",
+    ]
+    """The Realtime model used for this session."""
+
+    output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+    """The format of output audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+    sampled at a rate of 24kHz.
+    """
+
+    temperature: float
+    """Sampling temperature for the model, limited to [0.6, 1.2].
+
+    For audio models a temperature of 0.8 is highly recommended for best
+    performance.
+    """
+
+    tool_choice: str
+    """How the model chooses tools.
+
+    Options are `auto`, `none`, `required`, or specify a function.
+    """
+
+    tools: Iterable[Tool]
+    """Tools (functions) available to the model."""
+
+    turn_detection: TurnDetection
+    """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+    This can be set to `null` to turn off, in which case the client must manually
+    trigger model response. Server VAD means that the model will detect the start
+    and end of speech based on audio volume and respond at the end of user speech.
+    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    with VAD) to semantically estimate whether the user has finished speaking, then
+    dynamically sets a timeout based on this probability. For example, if user audio
+    trails off with "uhhm", the model will score a low probability of turn end and
+    wait longer for the user to continue speaking. This can be useful for more
+    natural conversations, but may have a higher latency.
+    """
+
+    voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+    """The voice the model uses to respond.
+
+    Voice cannot be changed during the session once the model has responded with
+    audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
+
+
+class InputAudioNoiseReduction(TypedDict, total=False):
+    type: Literal["near_field", "far_field"]
+    """Type of noise reduction.
+
+    `near_field` is for close-talking microphones such as headphones, `far_field` is
+    for far-field microphones such as laptop or conference room microphones.
+    """
+
+
+class InputAudioTranscription(TypedDict, total=False):
+    language: str
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: str
+    """
+    The model to use for transcription, current options are `gpt-4o-transcribe`,
+    `gpt-4o-mini-transcribe`, and `whisper-1`.
+    """
+
+    prompt: str
+    """
+    An optional text to guide the model's style or continue a previous audio
+    segment. For `whisper-1`, the
+    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+    For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+    "expect words related to technology".
+    """
+
+
+class Tool(TypedDict, total=False):
+    description: str
+    """
+    The description of the function, including guidance on when and how to call it,
+    and guidance about what to tell the user when calling (if anything).
+    """
+
+    name: str
+    """The name of the function."""
+
+    parameters: object
+    """Parameters of the function in JSON Schema."""
+
+    type: Literal["function"]
+    """The type of the tool, i.e. `function`."""
+
+
+class TurnDetection(TypedDict, total=False):
+    create_response: bool
+    """
+    Whether or not to automatically generate a response when a VAD stop event
+    occurs.
+    """
+
+    eagerness: Literal["low", "medium", "high", "auto"]
+    """Used only for `semantic_vad` mode.
+
+    The eagerness of the model to respond. `low` will wait longer for the user to
+    continue speaking, `high` will respond more quickly. `auto` is the default and
+    is equivalent to `medium`.
+    """
+
+    interrupt_response: bool
+    """
+    Whether or not to automatically interrupt any ongoing response with output to
+    the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+    occurs.
+    """
+
+    prefix_padding_ms: int
+    """Used only for `server_vad` mode.
+
+    Amount of audio to include before the VAD detected speech (in milliseconds).
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: int
+    """Used only for `server_vad` mode.
+
+    Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+    With shorter values the model will respond more quickly, but may jump in on
+    short pauses from the user.
+    """
+
+    threshold: float
+    """Used only for `server_vad` mode.
+
+    Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+    threshold will require louder audio to activate the model, and thus might
+    perform better in noisy environments.
+    """
+
+    type: Literal["server_vad", "semantic_vad"]
+    """Type of turn detection."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py
new file mode 100644
index 00000000..c26e62be
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_create_response.py
@@ -0,0 +1,150 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["SessionCreateResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"]
+
+
+class ClientSecret(BaseModel):
+    expires_at: int
+    """Timestamp for when the token expires.
+
+    Currently, all tokens expire after one minute.
+    """
+
+    value: str
+    """
+    Ephemeral key usable in client environments to authenticate connections to the
+    Realtime API. Use this in client-side environments rather than a standard API
+    token, which should only be used server-side.
+    """
+
+
+class InputAudioTranscription(BaseModel):
+    model: Optional[str] = None
+    """
+    The model to use for transcription, `whisper-1` is the only currently supported
+    model.
+    """
+
+
+class Tool(BaseModel):
+    description: Optional[str] = None
+    """
+    The description of the function, including guidance on when and how to call it,
+    and guidance about what to tell the user when calling (if anything).
+    """
+
+    name: Optional[str] = None
+    """The name of the function."""
+
+    parameters: Optional[object] = None
+    """Parameters of the function in JSON Schema."""
+
+    type: Optional[Literal["function"]] = None
+    """The type of the tool, i.e. `function`."""
+
+
+class TurnDetection(BaseModel):
+    prefix_padding_ms: Optional[int] = None
+    """Amount of audio to include before the VAD detected speech (in milliseconds).
+
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: Optional[int] = None
+    """Duration of silence to detect speech stop (in milliseconds).
+
+    Defaults to 500ms. With shorter values the model will respond more quickly, but
+    may jump in on short pauses from the user.
+    """
+
+    threshold: Optional[float] = None
+    """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
+
+    A higher threshold will require louder audio to activate the model, and thus
+    might perform better in noisy environments.
+    """
+
+    type: Optional[str] = None
+    """Type of turn detection, only `server_vad` is currently supported."""
+
+
+class SessionCreateResponse(BaseModel):
+    client_secret: ClientSecret
+    """Ephemeral key returned by the API."""
+
+    input_audio_format: Optional[str] = None
+    """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+    input_audio_transcription: Optional[InputAudioTranscription] = None
+    """
+    Configuration for input audio transcription, defaults to off and can be set to
+    `null` to turn off once on. Input audio transcription is not native to the
+    model, since the model consumes audio directly. Transcription runs
+    asynchronously through Whisper and should be treated as rough guidance rather
+    than the representation understood by the model.
+    """
+
+    instructions: Optional[str] = None
+    """The default system instructions (i.e.
+
+    system message) prepended to model calls. This field allows the client to guide
+    the model on desired responses. The model can be instructed on response content
+    and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+    good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+    into your voice", "laugh frequently"). The instructions are not guaranteed to be
+    followed by the model, but they provide guidance to the model on the desired
+    behavior.
+
+    Note that the server sets default instructions which will be used if this field
+    is not set and are visible in the `session.created` event at the start of the
+    session.
+    """
+
+    max_response_output_tokens: Union[int, Literal["inf"], None] = None
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+    `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+    """
+
+    modalities: Optional[List[Literal["text", "audio"]]] = None
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    output_audio_format: Optional[str] = None
+    """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+    temperature: Optional[float] = None
+    """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
+    tool_choice: Optional[str] = None
+    """How the model chooses tools.
+
+    Options are `auto`, `none`, `required`, or specify a function.
+    """
+
+    tools: Optional[List[Tool]] = None
+    """Tools (functions) available to the model."""
+
+    turn_detection: Optional[TurnDetection] = None
+    """Configuration for turn detection.
+
+    Can be set to `null` to turn off. Server VAD means that the model will detect
+    the start and end of speech based on audio volume and respond at the end of user
+    speech.
+    """
+
+    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    """The voice the model uses to respond.
+
+    Voice cannot be changed during the session once the model has responded with
+    audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py
new file mode 100644
index 00000000..baf6af38
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_created_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .session import Session
+from ...._models import BaseModel
+
+__all__ = ["SessionCreatedEvent"]
+
+
+class SessionCreatedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    session: Session
+    """Realtime session object configuration."""
+
+    type: Literal["session.created"]
+    """The event type, must be `session.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py
new file mode 100644
index 00000000..00180f59
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event.py
@@ -0,0 +1,242 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = [
+    "SessionUpdateEvent",
+    "Session",
+    "SessionInputAudioNoiseReduction",
+    "SessionInputAudioTranscription",
+    "SessionTool",
+    "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(BaseModel):
+    type: Optional[Literal["near_field", "far_field"]] = None
+    """Type of noise reduction.
+
+    `near_field` is for close-talking microphones such as headphones, `far_field` is
+    for far-field microphones such as laptop or conference room microphones.
+    """
+
+
+class SessionInputAudioTranscription(BaseModel):
+    language: Optional[str] = None
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: Optional[str] = None
+    """
+    The model to use for transcription, current options are `gpt-4o-transcribe`,
+    `gpt-4o-mini-transcribe`, and `whisper-1`.
+    """
+
+    prompt: Optional[str] = None
+    """
+    An optional text to guide the model's style or continue a previous audio
+    segment. For `whisper-1`, the
+    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+    For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+    "expect words related to technology".
+    """
+
+
+class SessionTool(BaseModel):
+    description: Optional[str] = None
+    """
+    The description of the function, including guidance on when and how to call it,
+    and guidance about what to tell the user when calling (if anything).
+    """
+
+    name: Optional[str] = None
+    """The name of the function."""
+
+    parameters: Optional[object] = None
+    """Parameters of the function in JSON Schema."""
+
+    type: Optional[Literal["function"]] = None
+    """The type of the tool, i.e. `function`."""
+
+
+class SessionTurnDetection(BaseModel):
+    create_response: Optional[bool] = None
+    """
+    Whether or not to automatically generate a response when a VAD stop event
+    occurs.
+    """
+
+    eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
+    """Used only for `semantic_vad` mode.
+
+    The eagerness of the model to respond. `low` will wait longer for the user to
+    continue speaking, `high` will respond more quickly. `auto` is the default and
+    is equivalent to `medium`.
+    """
+
+    interrupt_response: Optional[bool] = None
+    """
+    Whether or not to automatically interrupt any ongoing response with output to
+    the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+    occurs.
+    """
+
+    prefix_padding_ms: Optional[int] = None
+    """Used only for `server_vad` mode.
+
+    Amount of audio to include before the VAD detected speech (in milliseconds).
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: Optional[int] = None
+    """Used only for `server_vad` mode.
+
+    Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+    With shorter values the model will respond more quickly, but may jump in on
+    short pauses from the user.
+    """
+
+    threshold: Optional[float] = None
+    """Used only for `server_vad` mode.
+
+    Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+    threshold will require louder audio to activate the model, and thus might
+    perform better in noisy environments.
+    """
+
+    type: Optional[Literal["server_vad", "semantic_vad"]] = None
+    """Type of turn detection."""
+
+
+class Session(BaseModel):
+    input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+    """The format of input audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+    be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+    byte order.
+    """
+
+    input_audio_noise_reduction: Optional[SessionInputAudioNoiseReduction] = None
+    """Configuration for input audio noise reduction.
+
+    This can be set to `null` to turn off. Noise reduction filters audio added to
+    the input audio buffer before it is sent to VAD and the model. Filtering the
+    audio can improve VAD and turn detection accuracy (reducing false positives) and
+    model performance by improving perception of the input audio.
+    """
+
+    input_audio_transcription: Optional[SessionInputAudioTranscription] = None
+    """
+    Configuration for input audio transcription, defaults to off and can be set to
+    `null` to turn off once on. Input audio transcription is not native to the
+    model, since the model consumes audio directly. Transcription runs
+    asynchronously through
+    [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+    and should be treated as guidance of input audio content rather than precisely
+    what the model heard. The client can optionally set the language and prompt for
+    transcription, these offer additional guidance to the transcription service.
+    """
+
+    instructions: Optional[str] = None
+    """The default system instructions (i.e.
+
+    system message) prepended to model calls. This field allows the client to guide
+    the model on desired responses. The model can be instructed on response content
+    and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+    good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+    into your voice", "laugh frequently"). The instructions are not guaranteed to be
+    followed by the model, but they provide guidance to the model on the desired
+    behavior.
+
+    Note that the server sets default instructions which will be used if this field
+    is not set and are visible in the `session.created` event at the start of the
+    session.
+    """
+
+    max_response_output_tokens: Union[int, Literal["inf"], None] = None
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+    `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+    """
+
+    modalities: Optional[List[Literal["text", "audio"]]] = None
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    model: Optional[
+        Literal[
+            "gpt-4o-realtime-preview",
+            "gpt-4o-realtime-preview-2024-10-01",
+            "gpt-4o-realtime-preview-2024-12-17",
+            "gpt-4o-mini-realtime-preview",
+            "gpt-4o-mini-realtime-preview-2024-12-17",
+        ]
+    ] = None
+    """The Realtime model used for this session."""
+
+    output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+    """The format of output audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+    sampled at a rate of 24kHz.
+    """
+
+    temperature: Optional[float] = None
+    """Sampling temperature for the model, limited to [0.6, 1.2].
+
+    For audio models a temperature of 0.8 is highly recommended for best
+    performance.
+    """
+
+    tool_choice: Optional[str] = None
+    """How the model chooses tools.
+
+    Options are `auto`, `none`, `required`, or specify a function.
+    """
+
+    tools: Optional[List[SessionTool]] = None
+    """Tools (functions) available to the model."""
+
+    turn_detection: Optional[SessionTurnDetection] = None
+    """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+    This can be set to `null` to turn off, in which case the client must manually
+    trigger model response. Server VAD means that the model will detect the start
+    and end of speech based on audio volume and respond at the end of user speech.
+    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    with VAD) to semantically estimate whether the user has finished speaking, then
+    dynamically sets a timeout based on this probability. For example, if user audio
+    trails off with "uhhm", the model will score a low probability of turn end and
+    wait longer for the user to continue speaking. This can be useful for more
+    natural conversations, but may have a higher latency.
+    """
+
+    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    """The voice the model uses to respond.
+
+    Voice cannot be changed during the session once the model has responded with
+    audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
+
+
+class SessionUpdateEvent(BaseModel):
+    session: Session
+    """Realtime session object configuration."""
+
+    type: Literal["session.update"]
+    """The event type, must be `session.update`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py
new file mode 100644
index 00000000..b8bce8fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_update_event_param.py
@@ -0,0 +1,240 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = [
+    "SessionUpdateEventParam",
+    "Session",
+    "SessionInputAudioNoiseReduction",
+    "SessionInputAudioTranscription",
+    "SessionTool",
+    "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(TypedDict, total=False):
+    type: Literal["near_field", "far_field"]
+    """Type of noise reduction.
+
+    `near_field` is for close-talking microphones such as headphones, `far_field` is
+    for far-field microphones such as laptop or conference room microphones.
+    """
+
+
+class SessionInputAudioTranscription(TypedDict, total=False):
+    language: str
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: str
+    """
+    The model to use for transcription, current options are `gpt-4o-transcribe`,
+    `gpt-4o-mini-transcribe`, and `whisper-1`.
+    """
+
+    prompt: str
+    """
+    An optional text to guide the model's style or continue a previous audio
+    segment. For `whisper-1`, the
+    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+    For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+    "expect words related to technology".
+    """
+
+
+class SessionTool(TypedDict, total=False):
+    description: str
+    """
+    The description of the function, including guidance on when and how to call it,
+    and guidance about what to tell the user when calling (if anything).
+    """
+
+    name: str
+    """The name of the function."""
+
+    parameters: object
+    """Parameters of the function in JSON Schema."""
+
+    type: Literal["function"]
+    """The type of the tool, i.e. `function`."""
+
+
+class SessionTurnDetection(TypedDict, total=False):
+    create_response: bool
+    """
+    Whether or not to automatically generate a response when a VAD stop event
+    occurs.
+    """
+
+    eagerness: Literal["low", "medium", "high", "auto"]
+    """Used only for `semantic_vad` mode.
+
+    The eagerness of the model to respond. `low` will wait longer for the user to
+    continue speaking, `high` will respond more quickly. `auto` is the default and
+    is equivalent to `medium`.
+    """
+
+    interrupt_response: bool
+    """
+    Whether or not to automatically interrupt any ongoing response with output to
+    the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+    occurs.
+    """
+
+    prefix_padding_ms: int
+    """Used only for `server_vad` mode.
+
+    Amount of audio to include before the VAD detected speech (in milliseconds).
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: int
+    """Used only for `server_vad` mode.
+
+    Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+    With shorter values the model will respond more quickly, but may jump in on
+    short pauses from the user.
+    """
+
+    threshold: float
+    """Used only for `server_vad` mode.
+
+    Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+    threshold will require louder audio to activate the model, and thus might
+    perform better in noisy environments.
+    """
+
+    type: Literal["server_vad", "semantic_vad"]
+    """Type of turn detection."""
+
+
+class Session(TypedDict, total=False):
+    input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+    """The format of input audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+    be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+    byte order.
+    """
+
+    input_audio_noise_reduction: SessionInputAudioNoiseReduction
+    """Configuration for input audio noise reduction.
+
+    This can be set to `null` to turn off. Noise reduction filters audio added to
+    the input audio buffer before it is sent to VAD and the model. Filtering the
+    audio can improve VAD and turn detection accuracy (reducing false positives) and
+    model performance by improving perception of the input audio.
+    """
+
+    input_audio_transcription: SessionInputAudioTranscription
+    """
+    Configuration for input audio transcription, defaults to off and can be set to
+    `null` to turn off once on. Input audio transcription is not native to the
+    model, since the model consumes audio directly. Transcription runs
+    asynchronously through
+    [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+    and should be treated as guidance of input audio content rather than precisely
+    what the model heard. The client can optionally set the language and prompt for
+    transcription, these offer additional guidance to the transcription service.
+    """
+
+    instructions: str
+    """The default system instructions (i.e.
+
+    system message) prepended to model calls. This field allows the client to guide
+    the model on desired responses. The model can be instructed on response content
+    and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
+    good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+    into your voice", "laugh frequently"). The instructions are not guaranteed to be
+    followed by the model, but they provide guidance to the model on the desired
+    behavior.
+
+    Note that the server sets default instructions which will be used if this field
+    is not set and are visible in the `session.created` event at the start of the
+    session.
+    """
+
+    max_response_output_tokens: Union[int, Literal["inf"]]
+    """
+    Maximum number of output tokens for a single assistant response, inclusive of
+    tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
+    `inf` for the maximum available tokens for a given model. Defaults to `inf`.
+    """
+
+    modalities: List[Literal["text", "audio"]]
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    model: Literal[
+        "gpt-4o-realtime-preview",
+        "gpt-4o-realtime-preview-2024-10-01",
+        "gpt-4o-realtime-preview-2024-12-17",
+        "gpt-4o-mini-realtime-preview",
+        "gpt-4o-mini-realtime-preview-2024-12-17",
+    ]
+    """The Realtime model used for this session."""
+
+    output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+    """The format of output audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
+    sampled at a rate of 24kHz.
+    """
+
+    temperature: float
+    """Sampling temperature for the model, limited to [0.6, 1.2].
+
+    For audio models a temperature of 0.8 is highly recommended for best
+    performance.
+    """
+
+    tool_choice: str
+    """How the model chooses tools.
+
+    Options are `auto`, `none`, `required`, or specify a function.
+    """
+
+    tools: Iterable[SessionTool]
+    """Tools (functions) available to the model."""
+
+    turn_detection: SessionTurnDetection
+    """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+    This can be set to `null` to turn off, in which case the client must manually
+    trigger model response. Server VAD means that the model will detect the start
+    and end of speech based on audio volume and respond at the end of user speech.
+    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    with VAD) to semantically estimate whether the user has finished speaking, then
+    dynamically sets a timeout based on this probability. For example, if user audio
+    trails off with "uhhm", the model will score a low probability of turn end and
+    wait longer for the user to continue speaking. This can be useful for more
+    natural conversations, but may have a higher latency.
+    """
+
+    voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+    """The voice the model uses to respond.
+
+    Voice cannot be changed during the session once the model has responded with
+    audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
+    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    """
+
+
+class SessionUpdateEventParam(TypedDict, total=False):
+    session: Required[Session]
+    """Realtime session object configuration."""
+
+    type: Required[Literal["session.update"]]
+    """The event type, must be `session.update`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py
new file mode 100644
index 00000000..b9b6488e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/session_updated_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .session import Session
+from ...._models import BaseModel
+
+__all__ = ["SessionUpdatedEvent"]
+
+
+class SessionUpdatedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    session: Session
+    """Realtime session object configuration."""
+
+    type: Literal["session.updated"]
+    """The event type, must be `session.updated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py
new file mode 100644
index 00000000..7c7abf37
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session.py
@@ -0,0 +1,100 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["TranscriptionSession", "ClientSecret", "InputAudioTranscription", "TurnDetection"]
+
+
+class ClientSecret(BaseModel):
+    expires_at: int
+    """Timestamp for when the token expires.
+
+    Currently, all tokens expire after one minute.
+    """
+
+    value: str
+    """
+    Ephemeral key usable in client environments to authenticate connections to the
+    Realtime API. Use this in client-side environments rather than a standard API
+    token, which should only be used server-side.
+    """
+
+
+class InputAudioTranscription(BaseModel):
+    language: Optional[str] = None
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None
+    """The model to use for transcription.
+
+    Can be `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, or `whisper-1`.
+    """
+
+    prompt: Optional[str] = None
+    """An optional text to guide the model's style or continue a previous audio
+    segment.
+
+    The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+    should match the audio language.
+    """
+
+
+class TurnDetection(BaseModel):
+    prefix_padding_ms: Optional[int] = None
+    """Amount of audio to include before the VAD detected speech (in milliseconds).
+
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: Optional[int] = None
+    """Duration of silence to detect speech stop (in milliseconds).
+
+    Defaults to 500ms. With shorter values the model will respond more quickly, but
+    may jump in on short pauses from the user.
+    """
+
+    threshold: Optional[float] = None
+    """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
+
+    A higher threshold will require louder audio to activate the model, and thus
+    might perform better in noisy environments.
+    """
+
+    type: Optional[str] = None
+    """Type of turn detection, only `server_vad` is currently supported."""
+
+
+class TranscriptionSession(BaseModel):
+    client_secret: ClientSecret
+    """Ephemeral key returned by the API.
+
+    Only present when the session is created on the server via REST API.
+    """
+
+    input_audio_format: Optional[str] = None
+    """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
+    input_audio_transcription: Optional[InputAudioTranscription] = None
+    """Configuration of the transcription model."""
+
+    modalities: Optional[List[Literal["text", "audio"]]] = None
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    turn_detection: Optional[TurnDetection] = None
+    """Configuration for turn detection.
+
+    Can be set to `null` to turn off. Server VAD means that the model will detect
+    the start and end of speech based on audio volume and respond at the end of user
+    speech.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py
new file mode 100644
index 00000000..4066dc4c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_create_params.py
@@ -0,0 +1,143 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["TranscriptionSessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "TurnDetection"]
+
+
+class TranscriptionSessionCreateParams(TypedDict, total=False):
+    include: List[str]
+    """The set of items to include in the transcription. Current available items are:
+
+    - `item.input_audio_transcription.logprobs`
+    """
+
+    input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+    """The format of input audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+    be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+    byte order.
+    """
+
+    input_audio_noise_reduction: InputAudioNoiseReduction
+    """Configuration for input audio noise reduction.
+
+    This can be set to `null` to turn off. Noise reduction filters audio added to
+    the input audio buffer before it is sent to VAD and the model. Filtering the
+    audio can improve VAD and turn detection accuracy (reducing false positives) and
+    model performance by improving perception of the input audio.
+    """
+
+    input_audio_transcription: InputAudioTranscription
+    """Configuration for input audio transcription.
+
+    The client can optionally set the language and prompt for transcription, these
+    offer additional guidance to the transcription service.
+    """
+
+    modalities: List[Literal["text", "audio"]]
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    turn_detection: TurnDetection
+    """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+    This can be set to `null` to turn off, in which case the client must manually
+    trigger model response. Server VAD means that the model will detect the start
+    and end of speech based on audio volume and respond at the end of user speech.
+    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    with VAD) to semantically estimate whether the user has finished speaking, then
+    dynamically sets a timeout based on this probability. For example, if user audio
+    trails off with "uhhm", the model will score a low probability of turn end and
+    wait longer for the user to continue speaking. This can be useful for more
+    natural conversations, but may have a higher latency.
+    """
+
+
+class InputAudioNoiseReduction(TypedDict, total=False):
+    type: Literal["near_field", "far_field"]
+    """Type of noise reduction.
+
+    `near_field` is for close-talking microphones such as headphones, `far_field` is
+    for far-field microphones such as laptop or conference room microphones.
+    """
+
+
+class InputAudioTranscription(TypedDict, total=False):
+    language: str
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]
+    """
+    The model to use for transcription, current options are `gpt-4o-transcribe`,
+    `gpt-4o-mini-transcribe`, and `whisper-1`.
+    """
+
+    prompt: str
+    """
+    An optional text to guide the model's style or continue a previous audio
+    segment. For `whisper-1`, the
+    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+    For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+    "expect words related to technology".
+    """
+
+
+class TurnDetection(TypedDict, total=False):
+    create_response: bool
+    """
+    Whether or not to automatically generate a response when a VAD stop event
+    occurs.
+    """
+
+    eagerness: Literal["low", "medium", "high", "auto"]
+    """Used only for `semantic_vad` mode.
+
+    The eagerness of the model to respond. `low` will wait longer for the user to
+    continue speaking, `high` will respond more quickly. `auto` is the default and
+    is equivalent to `medium`.
+    """
+
+    interrupt_response: bool
+    """
+    Whether or not to automatically interrupt any ongoing response with output to
+    the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+    occurs.
+    """
+
+    prefix_padding_ms: int
+    """Used only for `server_vad` mode.
+
+    Amount of audio to include before the VAD detected speech (in milliseconds).
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: int
+    """Used only for `server_vad` mode.
+
+    Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+    With shorter values the model will respond more quickly, but may jump in on
+    short pauses from the user.
+    """
+
+    threshold: float
+    """Used only for `server_vad` mode.
+
+    Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+    threshold will require louder audio to activate the model, and thus might
+    perform better in noisy environments.
+    """
+
+    type: Literal["server_vad", "semantic_vad"]
+    """Type of turn detection."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py
new file mode 100644
index 00000000..043ac02e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update.py
@@ -0,0 +1,160 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = [
+    "TranscriptionSessionUpdate",
+    "Session",
+    "SessionInputAudioNoiseReduction",
+    "SessionInputAudioTranscription",
+    "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(BaseModel):
+    type: Optional[Literal["near_field", "far_field"]] = None
+    """Type of noise reduction.
+
+    `near_field` is for close-talking microphones such as headphones, `far_field` is
+    for far-field microphones such as laptop or conference room microphones.
+    """
+
+
+class SessionInputAudioTranscription(BaseModel):
+    language: Optional[str] = None
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None
+    """
+    The model to use for transcription, current options are `gpt-4o-transcribe`,
+    `gpt-4o-mini-transcribe`, and `whisper-1`.
+    """
+
+    prompt: Optional[str] = None
+    """
+    An optional text to guide the model's style or continue a previous audio
+    segment. For `whisper-1`, the
+    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+    For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+    "expect words related to technology".
+    """
+
+
+class SessionTurnDetection(BaseModel):
+    create_response: Optional[bool] = None
+    """
+    Whether or not to automatically generate a response when a VAD stop event
+    occurs.
+    """
+
+    eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
+    """Used only for `semantic_vad` mode.
+
+    The eagerness of the model to respond. `low` will wait longer for the user to
+    continue speaking, `high` will respond more quickly. `auto` is the default and
+    is equivalent to `medium`.
+    """
+
+    interrupt_response: Optional[bool] = None
+    """
+    Whether or not to automatically interrupt any ongoing response with output to
+    the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+    occurs.
+    """
+
+    prefix_padding_ms: Optional[int] = None
+    """Used only for `server_vad` mode.
+
+    Amount of audio to include before the VAD detected speech (in milliseconds).
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: Optional[int] = None
+    """Used only for `server_vad` mode.
+
+    Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+    With shorter values the model will respond more quickly, but may jump in on
+    short pauses from the user.
+    """
+
+    threshold: Optional[float] = None
+    """Used only for `server_vad` mode.
+
+    Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+    threshold will require louder audio to activate the model, and thus might
+    perform better in noisy environments.
+    """
+
+    type: Optional[Literal["server_vad", "semantic_vad"]] = None
+    """Type of turn detection."""
+
+
+class Session(BaseModel):
+    include: Optional[List[str]] = None
+    """The set of items to include in the transcription. Current available items are:
+
+    - `item.input_audio_transcription.logprobs`
+    """
+
+    input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+    """The format of input audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+    be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+    byte order.
+    """
+
+    input_audio_noise_reduction: Optional[SessionInputAudioNoiseReduction] = None
+    """Configuration for input audio noise reduction.
+
+    This can be set to `null` to turn off. Noise reduction filters audio added to
+    the input audio buffer before it is sent to VAD and the model. Filtering the
+    audio can improve VAD and turn detection accuracy (reducing false positives) and
+    model performance by improving perception of the input audio.
+    """
+
+    input_audio_transcription: Optional[SessionInputAudioTranscription] = None
+    """Configuration for input audio transcription.
+
+    The client can optionally set the language and prompt for transcription, these
+    offer additional guidance to the transcription service.
+    """
+
+    modalities: Optional[List[Literal["text", "audio"]]] = None
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    turn_detection: Optional[SessionTurnDetection] = None
+    """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+    This can be set to `null` to turn off, in which case the client must manually
+    trigger model response. Server VAD means that the model will detect the start
+    and end of speech based on audio volume and respond at the end of user speech.
+    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    with VAD) to semantically estimate whether the user has finished speaking, then
+    dynamically sets a timeout based on this probability. For example, if user audio
+    trails off with "uhhm", the model will score a low probability of turn end and
+    wait longer for the user to continue speaking. This can be useful for more
+    natural conversations, but may have a higher latency.
+    """
+
+
+class TranscriptionSessionUpdate(BaseModel):
+    session: Session
+    """Realtime transcription session object configuration."""
+
+    type: Literal["transcription_session.update"]
+    """The event type, must be `transcription_session.update`."""
+
+    event_id: Optional[str] = None
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py
new file mode 100644
index 00000000..997a36d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_update_param.py
@@ -0,0 +1,160 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = [
+    "TranscriptionSessionUpdateParam",
+    "Session",
+    "SessionInputAudioNoiseReduction",
+    "SessionInputAudioTranscription",
+    "SessionTurnDetection",
+]
+
+
+class SessionInputAudioNoiseReduction(TypedDict, total=False):
+    type: Literal["near_field", "far_field"]
+    """Type of noise reduction.
+
+    `near_field` is for close-talking microphones such as headphones, `far_field` is
+    for far-field microphones such as laptop or conference room microphones.
+    """
+
+
+class SessionInputAudioTranscription(TypedDict, total=False):
+    language: str
+    """The language of the input audio.
+
+    Supplying the input language in
+    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+    format will improve accuracy and latency.
+    """
+
+    model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]
+    """
+    The model to use for transcription, current options are `gpt-4o-transcribe`,
+    `gpt-4o-mini-transcribe`, and `whisper-1`.
+    """
+
+    prompt: str
+    """
+    An optional text to guide the model's style or continue a previous audio
+    segment. For `whisper-1`, the
+    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
+    For `gpt-4o-transcribe` models, the prompt is a free text string, for example
+    "expect words related to technology".
+    """
+
+
+class SessionTurnDetection(TypedDict, total=False):
+    create_response: bool
+    """
+    Whether or not to automatically generate a response when a VAD stop event
+    occurs.
+    """
+
+    eagerness: Literal["low", "medium", "high", "auto"]
+    """Used only for `semantic_vad` mode.
+
+    The eagerness of the model to respond. `low` will wait longer for the user to
+    continue speaking, `high` will respond more quickly. `auto` is the default and
+    is equivalent to `medium`.
+    """
+
+    interrupt_response: bool
+    """
+    Whether or not to automatically interrupt any ongoing response with output to
+    the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+    occurs.
+    """
+
+    prefix_padding_ms: int
+    """Used only for `server_vad` mode.
+
+    Amount of audio to include before the VAD detected speech (in milliseconds).
+    Defaults to 300ms.
+    """
+
+    silence_duration_ms: int
+    """Used only for `server_vad` mode.
+
+    Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
+    With shorter values the model will respond more quickly, but may jump in on
+    short pauses from the user.
+    """
+
+    threshold: float
+    """Used only for `server_vad` mode.
+
+    Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
+    threshold will require louder audio to activate the model, and thus might
+    perform better in noisy environments.
+    """
+
+    type: Literal["server_vad", "semantic_vad"]
+    """Type of turn detection."""
+
+
+class Session(TypedDict, total=False):
+    include: List[str]
+    """The set of items to include in the transcription. Current available items are:
+
+    - `item.input_audio_transcription.logprobs`
+    """
+
+    input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
+    """The format of input audio.
+
+    Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
+    be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
+    byte order.
+    """
+
+    input_audio_noise_reduction: SessionInputAudioNoiseReduction
+    """Configuration for input audio noise reduction.
+
+    This can be set to `null` to turn off. Noise reduction filters audio added to
+    the input audio buffer before it is sent to VAD and the model. Filtering the
+    audio can improve VAD and turn detection accuracy (reducing false positives) and
+    model performance by improving perception of the input audio.
+    """
+
+    input_audio_transcription: SessionInputAudioTranscription
+    """Configuration for input audio transcription.
+
+    The client can optionally set the language and prompt for transcription, these
+    offer additional guidance to the transcription service.
+    """
+
+    modalities: List[Literal["text", "audio"]]
+    """The set of modalities the model can respond with.
+
+    To disable audio, set this to ["text"].
+    """
+
+    turn_detection: SessionTurnDetection
+    """Configuration for turn detection, ether Server VAD or Semantic VAD.
+
+    This can be set to `null` to turn off, in which case the client must manually
+    trigger model response. Server VAD means that the model will detect the start
+    and end of speech based on audio volume and respond at the end of user speech.
+    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    with VAD) to semantically estimate whether the user has finished speaking, then
+    dynamically sets a timeout based on this probability. For example, if user audio
+    trails off with "uhhm", the model will score a low probability of turn end and
+    wait longer for the user to continue speaking. This can be useful for more
+    natural conversations, but may have a higher latency.
+    """
+
+
+class TranscriptionSessionUpdateParam(TypedDict, total=False):
+    session: Required[Session]
+    """Realtime transcription session object configuration."""
+
+    type: Required[Literal["transcription_session.update"]]
+    """The event type, must be `transcription_session.update`."""
+
+    event_id: str
+    """Optional client-generated ID used to identify this event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py
new file mode 100644
index 00000000..ffc100bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/realtime/transcription_session_updated_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .transcription_session import TranscriptionSession
+
+__all__ = ["TranscriptionSessionUpdatedEvent"]
+
+
+class TranscriptionSessionUpdatedEvent(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    session: TranscriptionSession
+    """A new Realtime transcription session configuration.
+
+    When a session is created on the server via REST API, the session object also
+    contains an ephemeral key. Default TTL for keys is one minute. This property is
+    not present when a session is updated via the WebSocket API.
+    """
+
+    type: Literal["transcription_session.updated"]
+    """The event type, must be `transcription_session.updated`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread.py
new file mode 100644
index 00000000..789f66e4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread.py
@@ -0,0 +1,63 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.metadata import Metadata
+
+__all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ToolResourcesCodeInterpreter(BaseModel):
+    file_ids: Optional[List[str]] = None
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+    available to the `code_interpreter` tool. There can be a maximum of 20 files
+    associated with the tool.
+    """
+
+
+class ToolResourcesFileSearch(BaseModel):
+    vector_store_ids: Optional[List[str]] = None
+    """
+    The
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this thread. There can be a maximum of 1 vector store attached to
+    the thread.
+    """
+
+
+class ToolResources(BaseModel):
+    code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
+
+    file_search: Optional[ToolResourcesFileSearch] = None
+
+
+class Thread(BaseModel):
+    id: str
+    """The identifier, which can be referenced in API endpoints."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the thread was created."""
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    object: Literal["thread"]
+    """The object type, which is always `thread`."""
+
+    tool_resources: Optional[ToolResources] = None
+    """
+    A set of resources that are made available to the assistant's tools in this
+    thread. The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py
new file mode 100644
index 00000000..065c390f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_and_run_params.py
@@ -0,0 +1,401 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared.chat_model import ChatModel
+from .function_tool_param import FunctionToolParam
+from .file_search_tool_param import FileSearchToolParam
+from ..shared_params.metadata import Metadata
+from .code_interpreter_tool_param import CodeInterpreterToolParam
+from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
+from .threads.message_content_part_param import MessageContentPartParam
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = [
+    "ThreadCreateAndRunParamsBase",
+    "Thread",
+    "ThreadMessage",
+    "ThreadMessageAttachment",
+    "ThreadMessageAttachmentTool",
+    "ThreadMessageAttachmentToolFileSearch",
+    "ThreadToolResources",
+    "ThreadToolResourcesCodeInterpreter",
+    "ThreadToolResourcesFileSearch",
+    "ThreadToolResourcesFileSearchVectorStore",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategy",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+    "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
+    "ToolResources",
+    "ToolResourcesCodeInterpreter",
+    "ToolResourcesFileSearch",
+    "Tool",
+    "TruncationStrategy",
+    "ThreadCreateAndRunParamsNonStreaming",
+    "ThreadCreateAndRunParamsStreaming",
+]
+
+
+class ThreadCreateAndRunParamsBase(TypedDict, total=False):
+    assistant_id: Required[str]
+    """
+    The ID of the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+    execute this run.
+    """
+
+    instructions: Optional[str]
+    """Override the default system message of the assistant.
+
+    This is useful for modifying the behavior on a per-run basis.
+    """
+
+    max_completion_tokens: Optional[int]
+    """
+    The maximum number of completion tokens that may be used over the course of the
+    run. The run will make a best effort to use only the number of completion tokens
+    specified, across multiple turns of the run. If the run exceeds the number of
+    completion tokens specified, the run will end with status `incomplete`. See
+    `incomplete_details` for more info.
+    """
+
+    max_prompt_tokens: Optional[int]
+    """The maximum number of prompt tokens that may be used over the course of the run.
+
+    The run will make a best effort to use only the number of prompt tokens
+    specified, across multiple turns of the run. If the run exceeds the number of
+    prompt tokens specified, the run will end with status `incomplete`. See
+    `incomplete_details` for more info.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    model: Union[str, ChatModel, None]
+    """
+    The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+    be used to execute this run. If a value is provided here, it will override the
+    model associated with the assistant. If not, the model associated with the
+    assistant will be used.
+    """
+
+    parallel_tool_calls: bool
+    """
+    Whether to enable
+    [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+    during tool use.
+    """
+
+    response_format: Optional[AssistantResponseFormatOptionParam]
+    """Specifies the format that the model must output.
+
+    Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+    [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+    and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which ensures the model will match your supplied JSON schema. Learn more
+    in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+    message the model generates is valid JSON.
+
+    **Important:** when using JSON mode, you **must** also instruct the model to
+    produce JSON yourself via a system or user message. Without this, the model may
+    generate an unending stream of whitespace until the generation reaches the token
+    limit, resulting in a long-running and seemingly "stuck" request. Also note that
+    the message content may be partially cut off if `finish_reason="length"`, which
+    indicates the generation exceeded `max_tokens` or the conversation exceeded the
+    max context length.
+    """
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+    """
+
+    thread: Thread
+    """Options to create a new thread.
+
+    If no thread is provided when running a request, an empty thread will be
+    created.
+    """
+
+    tool_choice: Optional[AssistantToolChoiceOptionParam]
+    """
+    Controls which (if any) tool is called by the model. `none` means the model will
+    not call any tools and instead generates a message. `auto` is the default value
+    and means the model can pick between generating a message or calling one or more
+    tools. `required` means the model must call one or more tools before responding
+    to the user. Specifying a particular tool like `{"type": "file_search"}` or
+    `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+    call that tool.
+    """
+
+    tool_resources: Optional[ToolResources]
+    """A set of resources that are used by the assistant's tools.
+
+    The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
+
+    tools: Optional[Iterable[Tool]]
+    """Override the tools the assistant can use for this run.
+
+    This is useful for modifying the behavior on a per-run basis.
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or temperature but not both.
+    """
+
+    truncation_strategy: Optional[TruncationStrategy]
+    """Controls for how a thread will be truncated prior to the run.
+
+    Use this to control the intial context window of the run.
+    """
+
+
+class ThreadMessageAttachmentToolFileSearch(TypedDict, total=False):
+    type: Required[Literal["file_search"]]
+    """The type of tool being defined: `file_search`"""
+
+
+ThreadMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, ThreadMessageAttachmentToolFileSearch]
+
+
+class ThreadMessageAttachment(TypedDict, total=False):
+    file_id: str
+    """The ID of the file to attach to the message."""
+
+    tools: Iterable[ThreadMessageAttachmentTool]
+    """The tools to add this file to."""
+
+
+class ThreadMessage(TypedDict, total=False):
+    content: Required[Union[str, Iterable[MessageContentPartParam]]]
+    """The text contents of the message."""
+
+    role: Required[Literal["user", "assistant"]]
+    """The role of the entity that is creating the message. Allowed values include:
+
+    - `user`: Indicates the message is sent by an actual user and should be used in
+      most cases to represent user-generated messages.
+    - `assistant`: Indicates the message is generated by the assistant. Use this
+      value to insert messages from the assistant into the conversation.
+    """
+
+    attachments: Optional[Iterable[ThreadMessageAttachment]]
+    """A list of files attached to the message, and the tools they should be added to."""
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+
+class ThreadToolResourcesCodeInterpreter(TypedDict, total=False):
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+    available to the `code_interpreter` tool. There can be a maximum of 20 files
+    associated with the tool.
+    """
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+    chunk_overlap_tokens: Required[int]
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: Required[int]
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
+
+
+class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+    static: Required[ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""
+
+
+ThreadToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+    ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto,
+    ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic,
+]
+
+
+class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
+    chunking_strategy: ThreadToolResourcesFileSearchVectorStoreChunkingStrategy
+    """The chunking strategy used to chunk the file(s).
+
+    If not set, will use the `auto` strategy.
+    """
+
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+    add to the vector store. There can be a maximum of 10000 files in a vector
+    store.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+
+class ThreadToolResourcesFileSearch(TypedDict, total=False):
+    vector_store_ids: List[str]
+    """
+    The
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this thread. There can be a maximum of 1 vector store attached to
+    the thread.
+    """
+
+    vector_stores: Iterable[ThreadToolResourcesFileSearchVectorStore]
+    """
+    A helper to create a
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    with file_ids and attach it to this thread. There can be a maximum of 1 vector
+    store attached to the thread.
+    """
+
+
+class ThreadToolResources(TypedDict, total=False):
+    code_interpreter: ThreadToolResourcesCodeInterpreter
+
+    file_search: ThreadToolResourcesFileSearch
+
+
+class Thread(TypedDict, total=False):
+    messages: Iterable[ThreadMessage]
+    """
+    A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+    start the thread with.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    tool_resources: Optional[ThreadToolResources]
+    """
+    A set of resources that are made available to the assistant's tools in this
+    thread. The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+    available to the `code_interpreter` tool. There can be a maximum of 20 files
+    associated with the tool.
+    """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+    vector_store_ids: List[str]
+    """
+    The ID of the
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this assistant. There can be a maximum of 1 vector store attached to
+    the assistant.
+    """
+
+
+class ToolResources(TypedDict, total=False):
+    code_interpreter: ToolResourcesCodeInterpreter
+
+    file_search: ToolResourcesFileSearch
+
+
+Tool: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
+
+
+class TruncationStrategy(TypedDict, total=False):
+    type: Required[Literal["auto", "last_messages"]]
+    """The truncation strategy to use for the thread.
+
+    The default is `auto`. If set to `last_messages`, the thread will be truncated
+    to the n most recent messages in the thread. When set to `auto`, messages in the
+    middle of the thread will be dropped to fit the context length of the model,
+    `max_prompt_tokens`.
+    """
+
+    last_messages: Optional[int]
+    """
+    The number of most recent messages from the thread when constructing the context
+    for the run.
+    """
+
+
+class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """
+    If `true`, returns a stream of events that happen during the Run as server-sent
+    events, terminating when the Run enters a terminal state with a `data: [DONE]`
+    message.
+    """
+
+
+class ThreadCreateAndRunParamsStreaming(ThreadCreateAndRunParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If `true`, returns a stream of events that happen during the Run as server-sent
+    events, terminating when the Run enters a terminal state with a `data: [DONE]`
+    message.
+    """
+
+
+ThreadCreateAndRunParams = Union[ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py
new file mode 100644
index 00000000..ec1ccf19
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_create_params.py
@@ -0,0 +1,185 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared_params.metadata import Metadata
+from .code_interpreter_tool_param import CodeInterpreterToolParam
+from .threads.message_content_part_param import MessageContentPartParam
+
+__all__ = [
+    "ThreadCreateParams",
+    "Message",
+    "MessageAttachment",
+    "MessageAttachmentTool",
+    "MessageAttachmentToolFileSearch",
+    "ToolResources",
+    "ToolResourcesCodeInterpreter",
+    "ToolResourcesFileSearch",
+    "ToolResourcesFileSearchVectorStore",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategy",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
+    "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
+]
+
+
+class ThreadCreateParams(TypedDict, total=False):
+    messages: Iterable[Message]
+    """
+    A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+    start the thread with.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    tool_resources: Optional[ToolResources]
+    """
+    A set of resources that are made available to the assistant's tools in this
+    thread. The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
+
+
+class MessageAttachmentToolFileSearch(TypedDict, total=False):
+    type: Required[Literal["file_search"]]
+    """The type of tool being defined: `file_search`"""
+
+
+MessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, MessageAttachmentToolFileSearch]
+
+
+class MessageAttachment(TypedDict, total=False):
+    file_id: str
+    """The ID of the file to attach to the message."""
+
+    tools: Iterable[MessageAttachmentTool]
+    """The tools to add this file to."""
+
+
+class Message(TypedDict, total=False):
+    content: Required[Union[str, Iterable[MessageContentPartParam]]]
+    """The text contents of the message."""
+
+    role: Required[Literal["user", "assistant"]]
+    """The role of the entity that is creating the message. Allowed values include:
+
+    - `user`: Indicates the message is sent by an actual user and should be used in
+      most cases to represent user-generated messages.
+    - `assistant`: Indicates the message is generated by the assistant. Use this
+      value to insert messages from the assistant into the conversation.
+    """
+
+    attachments: Optional[Iterable[MessageAttachment]]
+    """A list of files attached to the message, and the tools they should be added to."""
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+    available to the `code_interpreter` tool. There can be a maximum of 20 files
+    associated with the tool.
+    """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+    chunk_overlap_tokens: Required[int]
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: Required[int]
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
+
+
+class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+    static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""
+
+
+ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
+    ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
+]
+
+
+class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
+    chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
+    """The chunking strategy used to chunk the file(s).
+
+    If not set, will use the `auto` strategy.
+    """
+
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+    add to the vector store. There can be a maximum of 10000 files in a vector
+    store.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+    vector_store_ids: List[str]
+    """
+    The
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this thread. There can be a maximum of 1 vector store attached to
+    the thread.
+    """
+
+    vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
+    """
+    A helper to create a
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    with file_ids and attach it to this thread. There can be a maximum of 1 vector
+    store attached to the thread.
+    """
+
+
+class ToolResources(TypedDict, total=False):
+    code_interpreter: ToolResourcesCodeInterpreter
+
+    file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py
new file mode 100644
index 00000000..d3856263
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ThreadDeleted"]
+
+
+class ThreadDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["thread.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py
new file mode 100644
index 00000000..b47ea8f3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/thread_update_params.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ThreadUpdateParams(TypedDict, total=False):
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    tool_resources: Optional[ToolResources]
+    """
+    A set of resources that are made available to the assistant's tools in this
+    thread. The resources are specific to the type of tool. For example, the
+    `code_interpreter` tool requires a list of file IDs, while the `file_search`
+    tool requires a list of vector store IDs.
+    """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+    file_ids: List[str]
+    """
+    A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+    available to the `code_interpreter` tool. There can be a maximum of 20 files
+    associated with the tool.
+    """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+    vector_store_ids: List[str]
+    """
+    The
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    attached to this thread. There can be a maximum of 1 vector store attached to
+    the thread.
+    """
+
+
+class ToolResources(TypedDict, total=False):
+    code_interpreter: ToolResourcesCodeInterpreter
+
+    file_search: ToolResourcesFileSearch
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py
new file mode 100644
index 00000000..70853177
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/__init__.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .run import Run as Run
+from .text import Text as Text
+from .message import Message as Message
+from .image_url import ImageURL as ImageURL
+from .annotation import Annotation as Annotation
+from .image_file import ImageFile as ImageFile
+from .run_status import RunStatus as RunStatus
+from .text_delta import TextDelta as TextDelta
+from .message_delta import MessageDelta as MessageDelta
+from .image_url_delta import ImageURLDelta as ImageURLDelta
+from .image_url_param import ImageURLParam as ImageURLParam
+from .message_content import MessageContent as MessageContent
+from .message_deleted import MessageDeleted as MessageDeleted
+from .run_list_params import RunListParams as RunListParams
+from .annotation_delta import AnnotationDelta as AnnotationDelta
+from .image_file_delta import ImageFileDelta as ImageFileDelta
+from .image_file_param import ImageFileParam as ImageFileParam
+from .text_delta_block import TextDeltaBlock as TextDeltaBlock
+from .run_create_params import RunCreateParams as RunCreateParams
+from .run_update_params import RunUpdateParams as RunUpdateParams
+from .text_content_block import TextContentBlock as TextContentBlock
+from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent
+from .message_list_params import MessageListParams as MessageListParams
+from .refusal_delta_block import RefusalDeltaBlock as RefusalDeltaBlock
+from .file_path_annotation import FilePathAnnotation as FilePathAnnotation
+from .image_url_delta_block import ImageURLDeltaBlock as ImageURLDeltaBlock
+from .message_content_delta import MessageContentDelta as MessageContentDelta
+from .message_create_params import MessageCreateParams as MessageCreateParams
+from .message_update_params import MessageUpdateParams as MessageUpdateParams
+from .refusal_content_block import RefusalContentBlock as RefusalContentBlock
+from .image_file_delta_block import ImageFileDeltaBlock as ImageFileDeltaBlock
+from .image_url_content_block import ImageURLContentBlock as ImageURLContentBlock
+from .file_citation_annotation import FileCitationAnnotation as FileCitationAnnotation
+from .image_file_content_block import ImageFileContentBlock as ImageFileContentBlock
+from .text_content_block_param import TextContentBlockParam as TextContentBlockParam
+from .file_path_delta_annotation import FilePathDeltaAnnotation as FilePathDeltaAnnotation
+from .message_content_part_param import MessageContentPartParam as MessageContentPartParam
+from .image_url_content_block_param import ImageURLContentBlockParam as ImageURLContentBlockParam
+from .file_citation_delta_annotation import FileCitationDeltaAnnotation as FileCitationDeltaAnnotation
+from .image_file_content_block_param import ImageFileContentBlockParam as ImageFileContentBlockParam
+from .run_submit_tool_outputs_params import RunSubmitToolOutputsParams as RunSubmitToolOutputsParams
+from .required_action_function_tool_call import RequiredActionFunctionToolCall as RequiredActionFunctionToolCall
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py
new file mode 100644
index 00000000..13c10abf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .file_path_annotation import FilePathAnnotation
+from .file_citation_annotation import FileCitationAnnotation
+
+__all__ = ["Annotation"]
+
+Annotation: TypeAlias = Annotated[Union[FileCitationAnnotation, FilePathAnnotation], PropertyInfo(discriminator="type")]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py
new file mode 100644
index 00000000..c7c6c898
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/annotation_delta.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .file_path_delta_annotation import FilePathDeltaAnnotation
+from .file_citation_delta_annotation import FileCitationDeltaAnnotation
+
+__all__ = ["AnnotationDelta"]
+
+AnnotationDelta: TypeAlias = Annotated[
+    Union[FileCitationDeltaAnnotation, FilePathDeltaAnnotation], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py
new file mode 100644
index 00000000..c3085aed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_annotation.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FileCitationAnnotation", "FileCitation"]
+
+
+class FileCitation(BaseModel):
+    file_id: str
+    """The ID of the specific File the citation is from."""
+
+
+class FileCitationAnnotation(BaseModel):
+    end_index: int
+
+    file_citation: FileCitation
+
+    start_index: int
+
+    text: str
+    """The text in the message content that needs to be replaced."""
+
+    type: Literal["file_citation"]
+    """Always `file_citation`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py
new file mode 100644
index 00000000..b40c0d12
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_citation_delta_annotation.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FileCitationDeltaAnnotation", "FileCitation"]
+
+
+class FileCitation(BaseModel):
+    file_id: Optional[str] = None
+    """The ID of the specific File the citation is from."""
+
+    quote: Optional[str] = None
+    """The specific quote in the file."""
+
+
+class FileCitationDeltaAnnotation(BaseModel):
+    index: int
+    """The index of the annotation in the text content part."""
+
+    type: Literal["file_citation"]
+    """Always `file_citation`."""
+
+    end_index: Optional[int] = None
+
+    file_citation: Optional[FileCitation] = None
+
+    start_index: Optional[int] = None
+
+    text: Optional[str] = None
+    """The text in the message content that needs to be replaced."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py
new file mode 100644
index 00000000..9812737e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_annotation.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FilePathAnnotation", "FilePath"]
+
+
+class FilePath(BaseModel):
+    file_id: str
+    """The ID of the file that was generated."""
+
+
+class FilePathAnnotation(BaseModel):
+    end_index: int
+
+    file_path: FilePath
+
+    start_index: int
+
+    text: str
+    """The text in the message content that needs to be replaced."""
+
+    type: Literal["file_path"]
+    """Always `file_path`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py
new file mode 100644
index 00000000..0cbb445e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/file_path_delta_annotation.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FilePathDeltaAnnotation", "FilePath"]
+
+
+class FilePath(BaseModel):
+    file_id: Optional[str] = None
+    """The ID of the file that was generated."""
+
+
+class FilePathDeltaAnnotation(BaseModel):
+    index: int
+    """The index of the annotation in the text content part."""
+
+    type: Literal["file_path"]
+    """Always `file_path`."""
+
+    end_index: Optional[int] = None
+
+    file_path: Optional[FilePath] = None
+
+    start_index: Optional[int] = None
+
+    text: Optional[str] = None
+    """The text in the message content that needs to be replaced."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py
new file mode 100644
index 00000000..6000d975
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageFile"]
+
+
+class ImageFile(BaseModel):
+    file_id: str
+    """
+    The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+    in the message content. Set `purpose="vision"` when uploading the File if you
+    need to later display the file content.
+    """
+
+    detail: Optional[Literal["auto", "low", "high"]] = None
+    """Specifies the detail level of the image if specified by the user.
+
+    `low` uses fewer tokens, you can opt in to high resolution using `high`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py
new file mode 100644
index 00000000..a9099990
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .image_file import ImageFile
+
+__all__ = ["ImageFileContentBlock"]
+
+
+class ImageFileContentBlock(BaseModel):
+    image_file: ImageFile
+
+    type: Literal["image_file"]
+    """Always `image_file`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py
new file mode 100644
index 00000000..48d94bee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_content_block_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .image_file_param import ImageFileParam
+
+__all__ = ["ImageFileContentBlockParam"]
+
+
+class ImageFileContentBlockParam(TypedDict, total=False):
+    image_file: Required[ImageFileParam]
+
+    type: Required[Literal["image_file"]]
+    """Always `image_file`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py
new file mode 100644
index 00000000..4581184c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageFileDelta"]
+
+
+class ImageFileDelta(BaseModel):
+    detail: Optional[Literal["auto", "low", "high"]] = None
+    """Specifies the detail level of the image if specified by the user.
+
+    `low` uses fewer tokens, you can opt in to high resolution using `high`.
+    """
+
+    file_id: Optional[str] = None
+    """
+    The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+    in the message content. Set `purpose="vision"` when uploading the File if you
+    need to later display the file content.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py
new file mode 100644
index 00000000..0a5a2e8a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_delta_block.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .image_file_delta import ImageFileDelta
+
+__all__ = ["ImageFileDeltaBlock"]
+
+
+class ImageFileDeltaBlock(BaseModel):
+    index: int
+    """The index of the content part in the message."""
+
+    type: Literal["image_file"]
+    """Always `image_file`."""
+
+    image_file: Optional[ImageFileDelta] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py
new file mode 100644
index 00000000..e4a85358
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_file_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ImageFileParam"]
+
+
+class ImageFileParam(TypedDict, total=False):
+    file_id: Required[str]
+    """
+    The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+    in the message content. Set `purpose="vision"` when uploading the File if you
+    need to later display the file content.
+    """
+
+    detail: Literal["auto", "low", "high"]
+    """Specifies the detail level of the image if specified by the user.
+
+    `low` uses fewer tokens, you can opt in to high resolution using `high`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py
new file mode 100644
index 00000000..d1fac147
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageURL"]
+
+
+class ImageURL(BaseModel):
+    url: str
+    """
+    The external URL of the image, must be a supported image types: jpeg, jpg, png,
+    gif, webp.
+    """
+
+    detail: Optional[Literal["auto", "low", "high"]] = None
+    """Specifies the detail level of the image.
+
+    `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
+    value is `auto`
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py
new file mode 100644
index 00000000..40a16c1d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .image_url import ImageURL
+from ...._models import BaseModel
+
+__all__ = ["ImageURLContentBlock"]
+
+
+class ImageURLContentBlock(BaseModel):
+    image_url: ImageURL
+
+    type: Literal["image_url"]
+    """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py
new file mode 100644
index 00000000..585b926c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_content_block_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .image_url_param import ImageURLParam
+
+__all__ = ["ImageURLContentBlockParam"]
+
+
+class ImageURLContentBlockParam(TypedDict, total=False):
+    image_url: Required[ImageURLParam]
+
+    type: Required[Literal["image_url"]]
+    """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py
new file mode 100644
index 00000000..e4026719
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ImageURLDelta"]
+
+
+class ImageURLDelta(BaseModel):
+    detail: Optional[Literal["auto", "low", "high"]] = None
+    """Specifies the detail level of the image.
+
+    `low` uses fewer tokens, you can opt in to high resolution using `high`.
+    """
+
+    url: Optional[str] = None
+    """
+    The URL of the image, must be a supported image types: jpeg, jpg, png, gif,
+    webp.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py
new file mode 100644
index 00000000..5252da12
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_delta_block.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .image_url_delta import ImageURLDelta
+
+__all__ = ["ImageURLDeltaBlock"]
+
+
+class ImageURLDeltaBlock(BaseModel):
+    index: int
+    """The index of the content part in the message."""
+
+    type: Literal["image_url"]
+    """Always `image_url`."""
+
+    image_url: Optional[ImageURLDelta] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py
new file mode 100644
index 00000000..6b7e427e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/image_url_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ImageURLParam"]
+
+
+class ImageURLParam(TypedDict, total=False):
+    url: Required[str]
+    """
+    The external URL of the image, must be a supported image types: jpeg, jpg, png,
+    gif, webp.
+    """
+
+    detail: Literal["auto", "low", "high"]
+    """Specifies the detail level of the image.
+
+    `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
+    value is `auto`
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py
new file mode 100644
index 00000000..4a05a128
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message.py
@@ -0,0 +1,103 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ...._models import BaseModel
+from .message_content import MessageContent
+from ...shared.metadata import Metadata
+from ..code_interpreter_tool import CodeInterpreterTool
+
+__all__ = [
+    "Message",
+    "Attachment",
+    "AttachmentTool",
+    "AttachmentToolAssistantToolsFileSearchTypeOnly",
+    "IncompleteDetails",
+]
+
+
+class AttachmentToolAssistantToolsFileSearchTypeOnly(BaseModel):
+    type: Literal["file_search"]
+    """The type of tool being defined: `file_search`"""
+
+
+AttachmentTool: TypeAlias = Union[CodeInterpreterTool, AttachmentToolAssistantToolsFileSearchTypeOnly]
+
+
+class Attachment(BaseModel):
+    file_id: Optional[str] = None
+    """The ID of the file to attach to the message."""
+
+    tools: Optional[List[AttachmentTool]] = None
+    """The tools to add this file to."""
+
+
+class IncompleteDetails(BaseModel):
+    reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"]
+    """The reason the message is incomplete."""
+
+
+class Message(BaseModel):
+    id: str
+    """The identifier, which can be referenced in API endpoints."""
+
+    assistant_id: Optional[str] = None
+    """
+    If applicable, the ID of the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants) that
+    authored this message.
+    """
+
+    attachments: Optional[List[Attachment]] = None
+    """A list of files attached to the message, and the tools they were added to."""
+
+    completed_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the message was completed."""
+
+    content: List[MessageContent]
+    """The content of the message in array of text and/or images."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the message was created."""
+
+    incomplete_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the message was marked as incomplete."""
+
+    incomplete_details: Optional[IncompleteDetails] = None
+    """On an incomplete message, details about why the message is incomplete."""
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    object: Literal["thread.message"]
+    """The object type, which is always `thread.message`."""
+
+    role: Literal["user", "assistant"]
+    """The entity that produced the message. One of `user` or `assistant`."""
+
+    run_id: Optional[str] = None
+    """
+    The ID of the [run](https://platform.openai.com/docs/api-reference/runs)
+    associated with the creation of this message. Value is `null` when messages are
+    created manually using the create message or create thread endpoints.
+    """
+
+    status: Literal["in_progress", "incomplete", "completed"]
+    """
+    The status of the message, which can be either `in_progress`, `incomplete`, or
+    `completed`.
+    """
+
+    thread_id: str
+    """
+    The [thread](https://platform.openai.com/docs/api-reference/threads) ID that
+    this message belongs to.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py
new file mode 100644
index 00000000..9523c1e1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .text_content_block import TextContentBlock
+from .refusal_content_block import RefusalContentBlock
+from .image_url_content_block import ImageURLContentBlock
+from .image_file_content_block import ImageFileContentBlock
+
+__all__ = ["MessageContent"]
+
+
+MessageContent: TypeAlias = Annotated[
+    Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py
new file mode 100644
index 00000000..b6e7dfa4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_delta.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from .text_delta_block import TextDeltaBlock
+from .refusal_delta_block import RefusalDeltaBlock
+from .image_url_delta_block import ImageURLDeltaBlock
+from .image_file_delta_block import ImageFileDeltaBlock
+
+__all__ = ["MessageContentDelta"]
+
+MessageContentDelta: TypeAlias = Annotated[
+    Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py
new file mode 100644
index 00000000..dc09a01c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_content_part_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .text_content_block_param import TextContentBlockParam
+from .image_url_content_block_param import ImageURLContentBlockParam
+from .image_file_content_block_param import ImageFileContentBlockParam
+
+__all__ = ["MessageContentPartParam"]
+
+MessageContentPartParam: TypeAlias = Union[ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py
new file mode 100644
index 00000000..b5238682
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_create_params.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ...shared_params.metadata import Metadata
+from .message_content_part_param import MessageContentPartParam
+from ..code_interpreter_tool_param import CodeInterpreterToolParam
+
+__all__ = ["MessageCreateParams", "Attachment", "AttachmentTool", "AttachmentToolFileSearch"]
+
+
+class MessageCreateParams(TypedDict, total=False):
+    content: Required[Union[str, Iterable[MessageContentPartParam]]]
+    """The text contents of the message."""
+
+    role: Required[Literal["user", "assistant"]]
+    """The role of the entity that is creating the message. Allowed values include:
+
+    - `user`: Indicates the message is sent by an actual user and should be used in
+      most cases to represent user-generated messages.
+    - `assistant`: Indicates the message is generated by the assistant. Use this
+      value to insert messages from the assistant into the conversation.
+    """
+
+    attachments: Optional[Iterable[Attachment]]
+    """A list of files attached to the message, and the tools they should be added to."""
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+
+class AttachmentToolFileSearch(TypedDict, total=False):
+    type: Required[Literal["file_search"]]
+    """The type of tool being defined: `file_search`"""
+
+
+AttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, AttachmentToolFileSearch]
+
+
+class Attachment(TypedDict, total=False):
+    file_id: str
+    """The ID of the file to attach to the message."""
+
+    tools: Iterable[AttachmentTool]
+    """The tools to add this file to."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py
new file mode 100644
index 00000000..48210777
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["MessageDeleted"]
+
+
+class MessageDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["thread.message.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py
new file mode 100644
index 00000000..ecd0dfe3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .message_content_delta import MessageContentDelta
+
+__all__ = ["MessageDelta"]
+
+
+class MessageDelta(BaseModel):
+    content: Optional[List[MessageContentDelta]] = None
+    """The content of the message in array of text and/or images."""
+
+    role: Optional[Literal["user", "assistant"]] = None
+    """The entity that produced the message. One of `user` or `assistant`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py
new file mode 100644
index 00000000..3811cef6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_delta_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .message_delta import MessageDelta
+
+__all__ = ["MessageDeltaEvent"]
+
+
+class MessageDeltaEvent(BaseModel):
+    id: str
+    """The identifier of the message, which can be referenced in API endpoints."""
+
+    delta: MessageDelta
+    """The delta containing the fields that have changed on the Message."""
+
+    object: Literal["thread.message.delta"]
+    """The object type, which is always `thread.message.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py
new file mode 100644
index 00000000..a7c22a66
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_list_params.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["MessageListParams"]
+
+
+class MessageListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    before: str
+    """A cursor for use in pagination.
+
+    `before` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, starting with obj_foo, your
+    subsequent call can include before=obj_foo in order to fetch the previous page
+    of the list.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
+
+    run_id: str
+    """Filter messages by the run ID that generated them."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py
new file mode 100644
index 00000000..bb078281
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/message_update_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...shared_params.metadata import Metadata
+
+__all__ = ["MessageUpdateParams"]
+
+
+class MessageUpdateParams(TypedDict, total=False):
+    thread_id: Required[str]
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py
new file mode 100644
index 00000000..d54f9485
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_content_block.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalContentBlock"]
+
+
+class RefusalContentBlock(BaseModel):
+    refusal: str
+
+    type: Literal["refusal"]
+    """Always `refusal`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py
new file mode 100644
index 00000000..dbd8e626
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/refusal_delta_block.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalDeltaBlock"]
+
+
+class RefusalDeltaBlock(BaseModel):
+    index: int
+    """The index of the refusal part in the message."""
+
+    type: Literal["refusal"]
+    """Always `refusal`."""
+
+    refusal: Optional[str] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py
new file mode 100644
index 00000000..a24dfd06
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/required_action_function_tool_call.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RequiredActionFunctionToolCall", "Function"]
+
+
+class Function(BaseModel):
+    arguments: str
+    """The arguments that the model expects you to pass to the function."""
+
+    name: str
+    """The name of the function."""
+
+
+class RequiredActionFunctionToolCall(BaseModel):
+    id: str
+    """The ID of the tool call.
+
+    This ID must be referenced when you submit the tool outputs in using the
+    [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+    endpoint.
+    """
+
+    function: Function
+    """The function definition."""
+
+    type: Literal["function"]
+    """The type of tool call the output is required for.
+
+    For now, this is always `function`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py
new file mode 100644
index 00000000..da9418d6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run.py
@@ -0,0 +1,245 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .run_status import RunStatus
+from ..assistant_tool import AssistantTool
+from ...shared.metadata import Metadata
+from ..assistant_tool_choice_option import AssistantToolChoiceOption
+from ..assistant_response_format_option import AssistantResponseFormatOption
+from .required_action_function_tool_call import RequiredActionFunctionToolCall
+
+__all__ = [
+    "Run",
+    "IncompleteDetails",
+    "LastError",
+    "RequiredAction",
+    "RequiredActionSubmitToolOutputs",
+    "TruncationStrategy",
+    "Usage",
+]
+
+
+class IncompleteDetails(BaseModel):
+    reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None
+    """The reason why the run is incomplete.
+
+    This will point to which specific token limit was reached over the course of the
+    run.
+    """
+
+
+class LastError(BaseModel):
+    code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
+    """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`."""
+
+    message: str
+    """A human-readable description of the error."""
+
+
+class RequiredActionSubmitToolOutputs(BaseModel):
+    tool_calls: List[RequiredActionFunctionToolCall]
+    """A list of the relevant tool calls."""
+
+
+class RequiredAction(BaseModel):
+    submit_tool_outputs: RequiredActionSubmitToolOutputs
+    """Details on the tool outputs needed for this run to continue."""
+
+    type: Literal["submit_tool_outputs"]
+    """For now, this is always `submit_tool_outputs`."""
+
+
+class TruncationStrategy(BaseModel):
+    type: Literal["auto", "last_messages"]
+    """The truncation strategy to use for the thread.
+
+    The default is `auto`. If set to `last_messages`, the thread will be truncated
+    to the n most recent messages in the thread. When set to `auto`, messages in the
+    middle of the thread will be dropped to fit the context length of the model,
+    `max_prompt_tokens`.
+    """
+
+    last_messages: Optional[int] = None
+    """
+    The number of most recent messages from the thread when constructing the context
+    for the run.
+    """
+
+
+class Usage(BaseModel):
+    completion_tokens: int
+    """Number of completion tokens used over the course of the run."""
+
+    prompt_tokens: int
+    """Number of prompt tokens used over the course of the run."""
+
+    total_tokens: int
+    """Total number of tokens used (prompt + completion)."""
+
+
+class Run(BaseModel):
+    id: str
+    """The identifier, which can be referenced in API endpoints."""
+
+    assistant_id: str
+    """
+    The ID of the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+    execution of this run.
+    """
+
+    cancelled_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run was cancelled."""
+
+    completed_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run was completed."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the run was created."""
+
+    expires_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run will expire."""
+
+    failed_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run failed."""
+
+    incomplete_details: Optional[IncompleteDetails] = None
+    """Details on why the run is incomplete.
+
+    Will be `null` if the run is not incomplete.
+    """
+
+    instructions: str
+    """
+    The instructions that the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+    this run.
+    """
+
+    last_error: Optional[LastError] = None
+    """The last error associated with this run. Will be `null` if there are no errors."""
+
+    max_completion_tokens: Optional[int] = None
+    """
+    The maximum number of completion tokens specified to have been used over the
+    course of the run.
+    """
+
+    max_prompt_tokens: Optional[int] = None
+    """
+    The maximum number of prompt tokens specified to have been used over the course
+    of the run.
+    """
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    model: str
+    """
+    The model that the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+    this run.
+    """
+
+    object: Literal["thread.run"]
+    """The object type, which is always `thread.run`."""
+
+    parallel_tool_calls: bool
+    """
+    Whether to enable
+    [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+    during tool use.
+    """
+
+    required_action: Optional[RequiredAction] = None
+    """Details on the action required to continue the run.
+
+    Will be `null` if no action is required.
+    """
+
+    response_format: Optional[AssistantResponseFormatOption] = None
+    """Specifies the format that the model must output.
+
+    Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+    [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+    and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which ensures the model will match your supplied JSON schema. Learn more
+    in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+    message the model generates is valid JSON.
+
+    **Important:** when using JSON mode, you **must** also instruct the model to
+    produce JSON yourself via a system or user message. Without this, the model may
+    generate an unending stream of whitespace until the generation reaches the token
+    limit, resulting in a long-running and seemingly "stuck" request. Also note that
+    the message content may be partially cut off if `finish_reason="length"`, which
+    indicates the generation exceeded `max_tokens` or the conversation exceeded the
+    max context length.
+    """
+
+    started_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run was started."""
+
+    status: RunStatus
+    """
+    The status of the run, which can be either `queued`, `in_progress`,
+    `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
+    `incomplete`, or `expired`.
+    """
+
+    thread_id: str
+    """
+    The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
+    that was executed on as a part of this run.
+    """
+
+    tool_choice: Optional[AssistantToolChoiceOption] = None
+    """
+    Controls which (if any) tool is called by the model. `none` means the model will
+    not call any tools and instead generates a message. `auto` is the default value
+    and means the model can pick between generating a message or calling one or more
+    tools. `required` means the model must call one or more tools before responding
+    to the user. Specifying a particular tool like `{"type": "file_search"}` or
+    `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+    call that tool.
+    """
+
+    tools: List[AssistantTool]
+    """
+    The list of tools that the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+    this run.
+    """
+
+    truncation_strategy: Optional[TruncationStrategy] = None
+    """Controls for how a thread will be truncated prior to the run.
+
+    Use this to control the intial context window of the run.
+    """
+
+    usage: Optional[Usage] = None
+    """Usage statistics related to the run.
+
+    This value will be `null` if the run is not in a terminal state (i.e.
+    `in_progress`, `queued`, etc.).
+    """
+
+    temperature: Optional[float] = None
+    """The sampling temperature used for this run. If not set, defaults to 1."""
+
+    top_p: Optional[float] = None
+    """The nucleus sampling value used for this run. If not set, defaults to 1."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py
new file mode 100644
index 00000000..fc702278
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_create_params.py
@@ -0,0 +1,261 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ...shared.chat_model import ChatModel
+from ..assistant_tool_param import AssistantToolParam
+from .runs.run_step_include import RunStepInclude
+from ...shared_params.metadata import Metadata
+from ...shared.reasoning_effort import ReasoningEffort
+from .message_content_part_param import MessageContentPartParam
+from ..code_interpreter_tool_param import CodeInterpreterToolParam
+from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
+from ..assistant_response_format_option_param import AssistantResponseFormatOptionParam
+
+__all__ = [
+    "RunCreateParamsBase",
+    "AdditionalMessage",
+    "AdditionalMessageAttachment",
+    "AdditionalMessageAttachmentTool",
+    "AdditionalMessageAttachmentToolFileSearch",
+    "TruncationStrategy",
+    "RunCreateParamsNonStreaming",
+    "RunCreateParamsStreaming",
+]
+
+
+class RunCreateParamsBase(TypedDict, total=False):
+    assistant_id: Required[str]
+    """
+    The ID of the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+    execute this run.
+    """
+
+    include: List[RunStepInclude]
+    """A list of additional fields to include in the response.
+
+    Currently the only supported value is
+    `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+    search result content.
+
+    See the
+    [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+    for more information.
+    """
+
+    additional_instructions: Optional[str]
+    """Appends additional instructions at the end of the instructions for the run.
+
+    This is useful for modifying the behavior on a per-run basis without overriding
+    other instructions.
+    """
+
+    additional_messages: Optional[Iterable[AdditionalMessage]]
+    """Adds additional messages to the thread before creating the run."""
+
+    instructions: Optional[str]
+    """
+    Overrides the
+    [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+    of the assistant. This is useful for modifying the behavior on a per-run basis.
+    """
+
+    max_completion_tokens: Optional[int]
+    """
+    The maximum number of completion tokens that may be used over the course of the
+    run. The run will make a best effort to use only the number of completion tokens
+    specified, across multiple turns of the run. If the run exceeds the number of
+    completion tokens specified, the run will end with status `incomplete`. See
+    `incomplete_details` for more info.
+    """
+
+    max_prompt_tokens: Optional[int]
+    """The maximum number of prompt tokens that may be used over the course of the run.
+
+    The run will make a best effort to use only the number of prompt tokens
+    specified, across multiple turns of the run. If the run exceeds the number of
+    prompt tokens specified, the run will end with status `incomplete`. See
+    `incomplete_details` for more info.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    model: Union[str, ChatModel, None]
+    """
+    The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+    be used to execute this run. If a value is provided here, it will override the
+    model associated with the assistant. If not, the model associated with the
+    assistant will be used.
+    """
+
+    parallel_tool_calls: bool
+    """
+    Whether to enable
+    [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+    during tool use.
+    """
+
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    response_format: Optional[AssistantResponseFormatOptionParam]
+    """Specifies the format that the model must output.
+
+    Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+    [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
+    and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which ensures the model will match your supplied JSON schema. Learn more
+    in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
+    message the model generates is valid JSON.
+
+    **Important:** when using JSON mode, you **must** also instruct the model to
+    produce JSON yourself via a system or user message. Without this, the model may
+    generate an unending stream of whitespace until the generation reaches the token
+    limit, resulting in a long-running and seemingly "stuck" request. Also note that
+    the message content may be partially cut off if `finish_reason="length"`, which
+    indicates the generation exceeded `max_tokens` or the conversation exceeded the
+    max context length.
+    """
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+    """
+
+    tool_choice: Optional[AssistantToolChoiceOptionParam]
+    """
+    Controls which (if any) tool is called by the model. `none` means the model will
+    not call any tools and instead generates a message. `auto` is the default value
+    and means the model can pick between generating a message or calling one or more
+    tools. `required` means the model must call one or more tools before responding
+    to the user. Specifying a particular tool like `{"type": "file_search"}` or
+    `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+    call that tool.
+    """
+
+    tools: Optional[Iterable[AssistantToolParam]]
+    """Override the tools the assistant can use for this run.
+
+    This is useful for modifying the behavior on a per-run basis.
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or temperature but not both.
+    """
+
+    truncation_strategy: Optional[TruncationStrategy]
+    """Controls for how a thread will be truncated prior to the run.
+
+    Use this to control the intial context window of the run.
+    """
+
+
+class AdditionalMessageAttachmentToolFileSearch(TypedDict, total=False):
+    type: Required[Literal["file_search"]]
+    """The type of tool being defined: `file_search`"""
+
+
+AdditionalMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, AdditionalMessageAttachmentToolFileSearch]
+
+
+class AdditionalMessageAttachment(TypedDict, total=False):
+    file_id: str
+    """The ID of the file to attach to the message."""
+
+    tools: Iterable[AdditionalMessageAttachmentTool]
+    """The tools to add this file to."""
+
+
+class AdditionalMessage(TypedDict, total=False):
+    content: Required[Union[str, Iterable[MessageContentPartParam]]]
+    """The text contents of the message."""
+
+    role: Required[Literal["user", "assistant"]]
+    """The role of the entity that is creating the message. Allowed values include:
+
+    - `user`: Indicates the message is sent by an actual user and should be used in
+      most cases to represent user-generated messages.
+    - `assistant`: Indicates the message is generated by the assistant. Use this
+      value to insert messages from the assistant into the conversation.
+    """
+
+    attachments: Optional[Iterable[AdditionalMessageAttachment]]
+    """A list of files attached to the message, and the tools they should be added to."""
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+
+class TruncationStrategy(TypedDict, total=False):
+    type: Required[Literal["auto", "last_messages"]]
+    """The truncation strategy to use for the thread.
+
+    The default is `auto`. If set to `last_messages`, the thread will be truncated
+    to the n most recent messages in the thread. When set to `auto`, messages in the
+    middle of the thread will be dropped to fit the context length of the model,
+    `max_prompt_tokens`.
+    """
+
+    last_messages: Optional[int]
+    """
+    The number of most recent messages from the thread when constructing the context
+    for the run.
+    """
+
+
+class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """
+    If `true`, returns a stream of events that happen during the Run as server-sent
+    events, terminating when the Run enters a terminal state with a `data: [DONE]`
+    message.
+    """
+
+
+class RunCreateParamsStreaming(RunCreateParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If `true`, returns a stream of events that happen during the Run as server-sent
+    events, terminating when the Run enters a terminal state with a `data: [DONE]`
+    message.
+    """
+
+
+RunCreateParams = Union[RunCreateParamsNonStreaming, RunCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py
new file mode 100644
index 00000000..fbea54f6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_list_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["RunListParams"]
+
+
+class RunListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    before: str
+    """A cursor for use in pagination.
+
+    `before` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, starting with obj_foo, your
+    subsequent call can include before=obj_foo in order to fetch the previous page
+    of the list.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py
new file mode 100644
index 00000000..47c7cbd0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_status.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["RunStatus"]
+
+RunStatus: TypeAlias = Literal[
+    "queued",
+    "in_progress",
+    "requires_action",
+    "cancelling",
+    "cancelled",
+    "failed",
+    "completed",
+    "incomplete",
+    "expired",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py
new file mode 100644
index 00000000..14772860
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = [
+    "RunSubmitToolOutputsParamsBase",
+    "ToolOutput",
+    "RunSubmitToolOutputsParamsNonStreaming",
+    "RunSubmitToolOutputsParamsStreaming",
+]
+
+
+class RunSubmitToolOutputsParamsBase(TypedDict, total=False):
+    thread_id: Required[str]
+
+    tool_outputs: Required[Iterable[ToolOutput]]
+    """A list of tools for which the outputs are being submitted."""
+
+
+class ToolOutput(TypedDict, total=False):
+    output: str
+    """The output of the tool call to be submitted to continue the run."""
+
+    tool_call_id: str
+    """
+    The ID of the tool call in the `required_action` object within the run object
+    the output is being submitted for.
+    """
+
+
+class RunSubmitToolOutputsParamsNonStreaming(RunSubmitToolOutputsParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """
+    If `true`, returns a stream of events that happen during the Run as server-sent
+    events, terminating when the Run enters a terminal state with a `data: [DONE]`
+    message.
+    """
+
+
+class RunSubmitToolOutputsParamsStreaming(RunSubmitToolOutputsParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If `true`, returns a stream of events that happen during the Run as server-sent
+    events, terminating when the Run enters a terminal state with a `data: [DONE]`
+    message.
+    """
+
+
+RunSubmitToolOutputsParams = Union[RunSubmitToolOutputsParamsNonStreaming, RunSubmitToolOutputsParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py
new file mode 100644
index 00000000..fbcbd3fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/run_update_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...shared_params.metadata import Metadata
+
+__all__ = ["RunUpdateParams"]
+
+
+class RunUpdateParams(TypedDict, total=False):
+    thread_id: Required[str]
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py
new file mode 100644
index 00000000..467d5d79
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/__init__.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .run_step import RunStep as RunStep
+from .tool_call import ToolCall as ToolCall
+from .run_step_delta import RunStepDelta as RunStepDelta
+from .tool_call_delta import ToolCallDelta as ToolCallDelta
+from .run_step_include import RunStepInclude as RunStepInclude
+from .step_list_params import StepListParams as StepListParams
+from .function_tool_call import FunctionToolCall as FunctionToolCall
+from .run_step_delta_event import RunStepDeltaEvent as RunStepDeltaEvent
+from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams
+from .code_interpreter_logs import CodeInterpreterLogs as CodeInterpreterLogs
+from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall
+from .tool_call_delta_object import ToolCallDeltaObject as ToolCallDeltaObject
+from .tool_calls_step_details import ToolCallsStepDetails as ToolCallsStepDetails
+from .function_tool_call_delta import FunctionToolCallDelta as FunctionToolCallDelta
+from .code_interpreter_tool_call import CodeInterpreterToolCall as CodeInterpreterToolCall
+from .file_search_tool_call_delta import FileSearchToolCallDelta as FileSearchToolCallDelta
+from .run_step_delta_message_delta import RunStepDeltaMessageDelta as RunStepDeltaMessageDelta
+from .code_interpreter_output_image import CodeInterpreterOutputImage as CodeInterpreterOutputImage
+from .message_creation_step_details import MessageCreationStepDetails as MessageCreationStepDetails
+from .code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py
new file mode 100644
index 00000000..0bf8c1da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_logs.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["CodeInterpreterLogs"]
+
+
+class CodeInterpreterLogs(BaseModel):
+    index: int
+    """The index of the output in the outputs array."""
+
+    type: Literal["logs"]
+    """Always `logs`."""
+
+    logs: Optional[str] = None
+    """The text output from the Code Interpreter tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py
new file mode 100644
index 00000000..2257f37e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_output_image.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["CodeInterpreterOutputImage", "Image"]
+
+
+class Image(BaseModel):
+    file_id: Optional[str] = None
+    """
+    The [file](https://platform.openai.com/docs/api-reference/files) ID of the
+    image.
+    """
+
+
+class CodeInterpreterOutputImage(BaseModel):
+    index: int
+    """The index of the output in the outputs array."""
+
+    type: Literal["image"]
+    """Always `image`."""
+
+    image: Optional[Image] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py
new file mode 100644
index 00000000..e7df4e19
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call.py
@@ -0,0 +1,70 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+
+__all__ = [
+    "CodeInterpreterToolCall",
+    "CodeInterpreter",
+    "CodeInterpreterOutput",
+    "CodeInterpreterOutputLogs",
+    "CodeInterpreterOutputImage",
+    "CodeInterpreterOutputImageImage",
+]
+
+
+class CodeInterpreterOutputLogs(BaseModel):
+    logs: str
+    """The text output from the Code Interpreter tool call."""
+
+    type: Literal["logs"]
+    """Always `logs`."""
+
+
+class CodeInterpreterOutputImageImage(BaseModel):
+    file_id: str
+    """
+    The [file](https://platform.openai.com/docs/api-reference/files) ID of the
+    image.
+    """
+
+
+class CodeInterpreterOutputImage(BaseModel):
+    image: CodeInterpreterOutputImageImage
+
+    type: Literal["image"]
+    """Always `image`."""
+
+
+CodeInterpreterOutput: TypeAlias = Annotated[
+    Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage], PropertyInfo(discriminator="type")
+]
+
+
+class CodeInterpreter(BaseModel):
+    input: str
+    """The input to the Code Interpreter tool call."""
+
+    outputs: List[CodeInterpreterOutput]
+    """The outputs from the Code Interpreter tool call.
+
+    Code Interpreter can output one or more items, including text (`logs`) or images
+    (`image`). Each of these are represented by a different object type.
+    """
+
+
+class CodeInterpreterToolCall(BaseModel):
+    id: str
+    """The ID of the tool call."""
+
+    code_interpreter: CodeInterpreter
+    """The Code Interpreter tool call definition."""
+
+    type: Literal["code_interpreter"]
+    """The type of tool call.
+
+    This is always going to be `code_interpreter` for this type of tool call.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py
new file mode 100644
index 00000000..9d7a1563
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+from .code_interpreter_logs import CodeInterpreterLogs
+from .code_interpreter_output_image import CodeInterpreterOutputImage
+
+__all__ = ["CodeInterpreterToolCallDelta", "CodeInterpreter", "CodeInterpreterOutput"]
+
+CodeInterpreterOutput: TypeAlias = Annotated[
+    Union[CodeInterpreterLogs, CodeInterpreterOutputImage], PropertyInfo(discriminator="type")
+]
+
+
+class CodeInterpreter(BaseModel):
+    input: Optional[str] = None
+    """The input to the Code Interpreter tool call."""
+
+    outputs: Optional[List[CodeInterpreterOutput]] = None
+    """The outputs from the Code Interpreter tool call.
+
+    Code Interpreter can output one or more items, including text (`logs`) or images
+    (`image`). Each of these are represented by a different object type.
+    """
+
+
+class CodeInterpreterToolCallDelta(BaseModel):
+    index: int
+    """The index of the tool call in the tool calls array."""
+
+    type: Literal["code_interpreter"]
+    """The type of tool call.
+
+    This is always going to be `code_interpreter` for this type of tool call.
+    """
+
+    id: Optional[str] = None
+    """The ID of the tool call."""
+
+    code_interpreter: Optional[CodeInterpreter] = None
+    """The Code Interpreter tool call definition."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py
new file mode 100644
index 00000000..a2068daa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = [
+    "FileSearchToolCall",
+    "FileSearch",
+    "FileSearchRankingOptions",
+    "FileSearchResult",
+    "FileSearchResultContent",
+]
+
+
+class FileSearchRankingOptions(BaseModel):
+    ranker: Literal["auto", "default_2024_08_21"]
+    """The ranker to use for the file search.
+
+    If not specified will use the `auto` ranker.
+    """
+
+    score_threshold: float
+    """The score threshold for the file search.
+
+    All values must be a floating point number between 0 and 1.
+    """
+
+
+class FileSearchResultContent(BaseModel):
+    text: Optional[str] = None
+    """The text content of the file."""
+
+    type: Optional[Literal["text"]] = None
+    """The type of the content."""
+
+
+class FileSearchResult(BaseModel):
+    file_id: str
+    """The ID of the file that result was found in."""
+
+    file_name: str
+    """The name of the file that result was found in."""
+
+    score: float
+    """The score of the result.
+
+    All values must be a floating point number between 0 and 1.
+    """
+
+    content: Optional[List[FileSearchResultContent]] = None
+    """The content of the result that was found.
+
+    The content is only included if requested via the include query parameter.
+    """
+
+
+class FileSearch(BaseModel):
+    ranking_options: Optional[FileSearchRankingOptions] = None
+    """The ranking options for the file search."""
+
+    results: Optional[List[FileSearchResult]] = None
+    """The results of the file search."""
+
+
+class FileSearchToolCall(BaseModel):
+    id: str
+    """The ID of the tool call object."""
+
+    file_search: FileSearch
+    """For now, this is always going to be an empty object."""
+
+    type: Literal["file_search"]
+    """The type of tool call.
+
+    This is always going to be `file_search` for this type of tool call.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py
new file mode 100644
index 00000000..df5ac217
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/file_search_tool_call_delta.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["FileSearchToolCallDelta"]
+
+
+class FileSearchToolCallDelta(BaseModel):
+    file_search: object
+    """For now, this is always going to be an empty object."""
+
+    index: int
+    """The index of the tool call in the tool calls array."""
+
+    type: Literal["file_search"]
+    """The type of tool call.
+
+    This is always going to be `file_search` for this type of tool call.
+    """
+
+    id: Optional[str] = None
+    """The ID of the tool call object."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py
new file mode 100644
index 00000000..b1d354f8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call.py
@@ -0,0 +1,38 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["FunctionToolCall", "Function"]
+
+
+class Function(BaseModel):
+    arguments: str
+    """The arguments passed to the function."""
+
+    name: str
+    """The name of the function."""
+
+    output: Optional[str] = None
+    """The output of the function.
+
+    This will be `null` if the outputs have not been
+    [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+    yet.
+    """
+
+
+class FunctionToolCall(BaseModel):
+    id: str
+    """The ID of the tool call object."""
+
+    function: Function
+    """The definition of the function that was called."""
+
+    type: Literal["function"]
+    """The type of tool call.
+
+    This is always going to be `function` for this type of tool call.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py
new file mode 100644
index 00000000..faaf026f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/function_tool_call_delta.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["FunctionToolCallDelta", "Function"]
+
+
+class Function(BaseModel):
+    arguments: Optional[str] = None
+    """The arguments passed to the function."""
+
+    name: Optional[str] = None
+    """The name of the function."""
+
+    output: Optional[str] = None
+    """The output of the function.
+
+    This will be `null` if the outputs have not been
+    [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+    yet.
+    """
+
+
+class FunctionToolCallDelta(BaseModel):
+    index: int
+    """The index of the tool call in the tool calls array."""
+
+    type: Literal["function"]
+    """The type of tool call.
+
+    This is always going to be `function` for this type of tool call.
+    """
+
+    id: Optional[str] = None
+    """The ID of the tool call object."""
+
+    function: Optional[Function] = None
+    """The definition of the function that was called."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py
new file mode 100644
index 00000000..73439079
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/message_creation_step_details.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["MessageCreationStepDetails", "MessageCreation"]
+
+
+class MessageCreation(BaseModel):
+    message_id: str
+    """The ID of the message that was created by this run step."""
+
+
+class MessageCreationStepDetails(BaseModel):
+    message_creation: MessageCreation
+
+    type: Literal["message_creation"]
+    """Always `message_creation`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py
new file mode 100644
index 00000000..b5f380c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step.py
@@ -0,0 +1,115 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+from ....shared.metadata import Metadata
+from .tool_calls_step_details import ToolCallsStepDetails
+from .message_creation_step_details import MessageCreationStepDetails
+
+__all__ = ["RunStep", "LastError", "StepDetails", "Usage"]
+
+
+class LastError(BaseModel):
+    code: Literal["server_error", "rate_limit_exceeded"]
+    """One of `server_error` or `rate_limit_exceeded`."""
+
+    message: str
+    """A human-readable description of the error."""
+
+
+StepDetails: TypeAlias = Annotated[
+    Union[MessageCreationStepDetails, ToolCallsStepDetails], PropertyInfo(discriminator="type")
+]
+
+
+class Usage(BaseModel):
+    completion_tokens: int
+    """Number of completion tokens used over the course of the run step."""
+
+    prompt_tokens: int
+    """Number of prompt tokens used over the course of the run step."""
+
+    total_tokens: int
+    """Total number of tokens used (prompt + completion)."""
+
+
+class RunStep(BaseModel):
+    id: str
+    """The identifier of the run step, which can be referenced in API endpoints."""
+
+    assistant_id: str
+    """
+    The ID of the
+    [assistant](https://platform.openai.com/docs/api-reference/assistants)
+    associated with the run step.
+    """
+
+    cancelled_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run step was cancelled."""
+
+    completed_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run step completed."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the run step was created."""
+
+    expired_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run step expired.
+
+    A step is considered expired if the parent run is expired.
+    """
+
+    failed_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the run step failed."""
+
+    last_error: Optional[LastError] = None
+    """The last error associated with this run step.
+
+    Will be `null` if there are no errors.
+    """
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    object: Literal["thread.run.step"]
+    """The object type, which is always `thread.run.step`."""
+
+    run_id: str
+    """
+    The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that
+    this run step is a part of.
+    """
+
+    status: Literal["in_progress", "cancelled", "failed", "completed", "expired"]
+    """
+    The status of the run step, which can be either `in_progress`, `cancelled`,
+    `failed`, `completed`, or `expired`.
+    """
+
+    step_details: StepDetails
+    """The details of the run step."""
+
+    thread_id: str
+    """
+    The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
+    that was run.
+    """
+
+    type: Literal["message_creation", "tool_calls"]
+    """The type of run step, which can be either `message_creation` or `tool_calls`."""
+
+    usage: Optional[Usage] = None
+    """Usage statistics related to the run step.
+
+    This value will be `null` while the run step's status is `in_progress`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py
new file mode 100644
index 00000000..1139088f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from ....._models import BaseModel
+from .tool_call_delta_object import ToolCallDeltaObject
+from .run_step_delta_message_delta import RunStepDeltaMessageDelta
+
+__all__ = ["RunStepDelta", "StepDetails"]
+
+StepDetails: TypeAlias = Annotated[
+    Union[RunStepDeltaMessageDelta, ToolCallDeltaObject], PropertyInfo(discriminator="type")
+]
+
+
+class RunStepDelta(BaseModel):
+    step_details: Optional[StepDetails] = None
+    """The details of the run step."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py
new file mode 100644
index 00000000..7f3f92aa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+from .run_step_delta import RunStepDelta
+
+__all__ = ["RunStepDeltaEvent"]
+
+
+class RunStepDeltaEvent(BaseModel):
+    id: str
+    """The identifier of the run step, which can be referenced in API endpoints."""
+
+    delta: RunStepDelta
+    """The delta containing the fields that have changed on the run step."""
+
+    object: Literal["thread.run.step.delta"]
+    """The object type, which is always `thread.run.step.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py
new file mode 100644
index 00000000..f58ed3d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_delta_message_delta.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["RunStepDeltaMessageDelta", "MessageCreation"]
+
+
+class MessageCreation(BaseModel):
+    message_id: Optional[str] = None
+    """The ID of the message that was created by this run step."""
+
+
+class RunStepDeltaMessageDelta(BaseModel):
+    type: Literal["message_creation"]
+    """Always `message_creation`."""
+
+    message_creation: Optional[MessageCreation] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py
new file mode 100644
index 00000000..8e76c1b7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/run_step_include.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["RunStepInclude"]
+
+RunStepInclude: TypeAlias = Literal["step_details.tool_calls[*].file_search.results[*].content"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py
new file mode 100644
index 00000000..a6be771d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_list_params.py
@@ -0,0 +1,56 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from .run_step_include import RunStepInclude
+
+__all__ = ["StepListParams"]
+
+
+class StepListParams(TypedDict, total=False):
+    thread_id: Required[str]
+
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    before: str
+    """A cursor for use in pagination.
+
+    `before` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, starting with obj_foo, your
+    subsequent call can include before=obj_foo in order to fetch the previous page
+    of the list.
+    """
+
+    include: List[RunStepInclude]
+    """A list of additional fields to include in the response.
+
+    Currently the only supported value is
+    `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+    search result content.
+
+    See the
+    [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+    for more information.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py
new file mode 100644
index 00000000..ecbb72ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/step_retrieve_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+from .run_step_include import RunStepInclude
+
+__all__ = ["StepRetrieveParams"]
+
+
+class StepRetrieveParams(TypedDict, total=False):
+    thread_id: Required[str]
+
+    run_id: Required[str]
+
+    include: List[RunStepInclude]
+    """A list of additional fields to include in the response.
+
+    Currently the only supported value is
+    `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+    search result content.
+
+    See the
+    [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
+    for more information.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py
new file mode 100644
index 00000000..565e3109
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from .function_tool_call import FunctionToolCall
+from .file_search_tool_call import FileSearchToolCall
+from .code_interpreter_tool_call import CodeInterpreterToolCall
+
+__all__ = ["ToolCall"]
+
+ToolCall: TypeAlias = Annotated[
+    Union[CodeInterpreterToolCall, FileSearchToolCall, FunctionToolCall], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py
new file mode 100644
index 00000000..f0b8070c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ....._utils import PropertyInfo
+from .function_tool_call_delta import FunctionToolCallDelta
+from .file_search_tool_call_delta import FileSearchToolCallDelta
+from .code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta
+
+__all__ = ["ToolCallDelta"]
+
+ToolCallDelta: TypeAlias = Annotated[
+    Union[CodeInterpreterToolCallDelta, FileSearchToolCallDelta, FunctionToolCallDelta],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py
new file mode 100644
index 00000000..189dce77
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_call_delta_object.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+from .tool_call_delta import ToolCallDelta
+
+__all__ = ["ToolCallDeltaObject"]
+
+
+class ToolCallDeltaObject(BaseModel):
+    type: Literal["tool_calls"]
+    """Always `tool_calls`."""
+
+    tool_calls: Optional[List[ToolCallDelta]] = None
+    """An array of tool calls the run step was involved in.
+
+    These can be associated with one of three types of tools: `code_interpreter`,
+    `file_search`, or `function`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py
new file mode 100644
index 00000000..a084d387
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from .tool_call import ToolCall
+from ....._models import BaseModel
+
+__all__ = ["ToolCallsStepDetails"]
+
+
+class ToolCallsStepDetails(BaseModel):
+    tool_calls: List[ToolCall]
+    """An array of tool calls the run step was involved in.
+
+    These can be associated with one of three types of tools: `code_interpreter`,
+    `file_search`, or `function`.
+    """
+
+    type: Literal["tool_calls"]
+    """Always `tool_calls`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py
new file mode 100644
index 00000000..853bec29
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ...._models import BaseModel
+from .annotation import Annotation
+
+__all__ = ["Text"]
+
+
+class Text(BaseModel):
+    annotations: List[Annotation]
+
+    value: str
+    """The data that makes up the text."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py
new file mode 100644
index 00000000..3706d6b9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .text import Text
+from ...._models import BaseModel
+
+__all__ = ["TextContentBlock"]
+
+
+class TextContentBlock(BaseModel):
+    text: Text
+
+    type: Literal["text"]
+    """Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py
new file mode 100644
index 00000000..6313de32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_content_block_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["TextContentBlockParam"]
+
+
+class TextContentBlockParam(TypedDict, total=False):
+    text: Required[str]
+    """Text content to be sent to the model"""
+
+    type: Required[Literal["text"]]
+    """Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py
new file mode 100644
index 00000000..09cd3570
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from .annotation_delta import AnnotationDelta
+
+__all__ = ["TextDelta"]
+
+
+class TextDelta(BaseModel):
+    annotations: Optional[List[AnnotationDelta]] = None
+
+    value: Optional[str] = None
+    """The data that makes up the text."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py
new file mode 100644
index 00000000..586116e0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/beta/threads/text_delta_block.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .text_delta import TextDelta
+
+__all__ = ["TextDeltaBlock"]
+
+
+class TextDeltaBlock(BaseModel):
+    index: int
+    """The index of the content part in the message."""
+
+    type: Literal["text"]
+    """Always `text`."""
+
+    text: Optional[TextDelta] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py
new file mode 100644
index 00000000..b4f43b29
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/__init__.py
@@ -0,0 +1,71 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .chat_completion import ChatCompletion as ChatCompletion
+from .chat_completion_role import ChatCompletionRole as ChatCompletionRole
+from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio
+from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .completion_list_params import CompletionListParams as CompletionListParams
+from .parsed_chat_completion import (
+    ParsedChoice as ParsedChoice,
+    ParsedChatCompletion as ParsedChatCompletion,
+    ParsedChatCompletionMessage as ParsedChatCompletionMessage,
+)
+from .chat_completion_deleted import ChatCompletionDeleted as ChatCompletionDeleted
+from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
+from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality
+from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams
+from .parsed_function_tool_call import (
+    ParsedFunction as ParsedFunction,
+    ParsedFunctionToolCall as ParsedFunctionToolCall,
+)
+from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
+from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam
+from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
+from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
+from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort
+from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall
+from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam
+from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam
+from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam
+from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam
+from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam
+from .chat_completion_function_message_param import (
+    ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,
+)
+from .chat_completion_assistant_message_param import (
+    ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,
+)
+from .chat_completion_content_part_text_param import (
+    ChatCompletionContentPartTextParam as ChatCompletionContentPartTextParam,
+)
+from .chat_completion_developer_message_param import (
+    ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,
+)
+from .chat_completion_message_tool_call_param import (
+    ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam,
+)
+from .chat_completion_named_tool_choice_param import (
+    ChatCompletionNamedToolChoiceParam as ChatCompletionNamedToolChoiceParam,
+)
+from .chat_completion_content_part_image_param import (
+    ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam,
+)
+from .chat_completion_prediction_content_param import (
+    ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam,
+)
+from .chat_completion_tool_choice_option_param import (
+    ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam,
+)
+from .chat_completion_content_part_refusal_param import (
+    ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam,
+)
+from .chat_completion_function_call_option_param import (
+    ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam,
+)
+from .chat_completion_content_part_input_audio_param import (
+    ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py
new file mode 100644
index 00000000..cb812a27
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion.py
@@ -0,0 +1,73 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..completion_usage import CompletionUsage
+from .chat_completion_message import ChatCompletionMessage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
+
+
+class ChoiceLogprobs(BaseModel):
+    content: Optional[List[ChatCompletionTokenLogprob]] = None
+    """A list of message content tokens with log probability information."""
+
+    refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+    """A list of message refusal tokens with log probability information."""
+
+
+class Choice(BaseModel):
+    finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
+    """The reason the model stopped generating tokens.
+
+    This will be `stop` if the model hit a natural stop point or a provided stop
+    sequence, `length` if the maximum number of tokens specified in the request was
+    reached, `content_filter` if content was omitted due to a flag from our content
+    filters, `tool_calls` if the model called a tool, or `function_call`
+    (deprecated) if the model called a function.
+    """
+
+    index: int
+    """The index of the choice in the list of choices."""
+
+    logprobs: Optional[ChoiceLogprobs] = None
+    """Log probability information for the choice."""
+
+    message: ChatCompletionMessage
+    """A chat completion message generated by the model."""
+
+
+class ChatCompletion(BaseModel):
+    id: str
+    """A unique identifier for the chat completion."""
+
+    choices: List[Choice]
+    """A list of chat completion choices.
+
+    Can be more than one if `n` is greater than 1.
+    """
+
+    created: int
+    """The Unix timestamp (in seconds) of when the chat completion was created."""
+
+    model: str
+    """The model used for the chat completion."""
+
+    object: Literal["chat.completion"]
+    """The object type, which is always `chat.completion`."""
+
+    service_tier: Optional[Literal["scale", "default"]] = None
+    """The service tier used for processing the request."""
+
+    system_fingerprint: Optional[str] = None
+    """This fingerprint represents the backend configuration that the model runs with.
+
+    Can be used in conjunction with the `seed` request parameter to understand when
+    backend changes have been made that might impact determinism.
+    """
+
+    usage: Optional[CompletionUsage] = None
+    """Usage statistics for the completion request."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py
new file mode 100644
index 00000000..35e3a3d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_assistant_message_param.py
@@ -0,0 +1,70 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam
+from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam
+
+__all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"]
+
+
+class Audio(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for a previous audio response from the model."""
+
+
+ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam]
+
+
+class FunctionCall(TypedDict, total=False):
+    arguments: Required[str]
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: Required[str]
+    """The name of the function to call."""
+
+
+class ChatCompletionAssistantMessageParam(TypedDict, total=False):
+    role: Required[Literal["assistant"]]
+    """The role of the messages author, in this case `assistant`."""
+
+    audio: Optional[Audio]
+    """Data about a previous audio response from the model.
+
+    [Learn more](https://platform.openai.com/docs/guides/audio).
+    """
+
+    content: Union[str, Iterable[ContentArrayOfContentPart], None]
+    """The contents of the assistant message.
+
+    Required unless `tool_calls` or `function_call` is specified.
+    """
+
+    function_call: Optional[FunctionCall]
+    """Deprecated and replaced by `tool_calls`.
+
+    The name and arguments of a function that should be called, as generated by the
+    model.
+    """
+
+    name: str
+    """An optional name for the participant.
+
+    Provides the model information to differentiate between participants of the same
+    role.
+    """
+
+    refusal: Optional[str]
+    """The refusal message by the assistant."""
+
+    tool_calls: Iterable[ChatCompletionMessageToolCallParam]
+    """The tool calls generated by the model, such as function calls."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py
new file mode 100644
index 00000000..dd15508e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionAudio"]
+
+
+class ChatCompletionAudio(BaseModel):
+    id: str
+    """Unique identifier for this audio response."""
+
+    data: str
+    """
+    Base64 encoded audio bytes generated by the model, in the format specified in
+    the request.
+    """
+
+    expires_at: int
+    """
+    The Unix timestamp (in seconds) for when this audio response will no longer be
+    accessible on the server for use in multi-turn conversations.
+    """
+
+    transcript: str
+    """Transcript of the audio generated by the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py
new file mode 100644
index 00000000..63214178
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_audio_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionAudioParam"]
+
+
+class ChatCompletionAudioParam(TypedDict, total=False):
+    format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]]
+    """Specifies the output audio format.
+
+    Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
+    """
+
+    voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
+    """The voice the model uses to respond.
+
+    Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and
+    `shimmer`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py
new file mode 100644
index 00000000..31b9cb54
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_chunk.py
@@ -0,0 +1,150 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..completion_usage import CompletionUsage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = [
+    "ChatCompletionChunk",
+    "Choice",
+    "ChoiceDelta",
+    "ChoiceDeltaFunctionCall",
+    "ChoiceDeltaToolCall",
+    "ChoiceDeltaToolCallFunction",
+    "ChoiceLogprobs",
+]
+
+
+class ChoiceDeltaFunctionCall(BaseModel):
+    arguments: Optional[str] = None
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: Optional[str] = None
+    """The name of the function to call."""
+
+
+class ChoiceDeltaToolCallFunction(BaseModel):
+    arguments: Optional[str] = None
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: Optional[str] = None
+    """The name of the function to call."""
+
+
+class ChoiceDeltaToolCall(BaseModel):
+    index: int
+
+    id: Optional[str] = None
+    """The ID of the tool call."""
+
+    function: Optional[ChoiceDeltaToolCallFunction] = None
+
+    type: Optional[Literal["function"]] = None
+    """The type of the tool. Currently, only `function` is supported."""
+
+
+class ChoiceDelta(BaseModel):
+    content: Optional[str] = None
+    """The contents of the chunk message."""
+
+    function_call: Optional[ChoiceDeltaFunctionCall] = None
+    """Deprecated and replaced by `tool_calls`.
+
+    The name and arguments of a function that should be called, as generated by the
+    model.
+    """
+
+    refusal: Optional[str] = None
+    """The refusal message generated by the model."""
+
+    role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None
+    """The role of the author of this message."""
+
+    tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
+
+
+class ChoiceLogprobs(BaseModel):
+    content: Optional[List[ChatCompletionTokenLogprob]] = None
+    """A list of message content tokens with log probability information."""
+
+    refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+    """A list of message refusal tokens with log probability information."""
+
+
+class Choice(BaseModel):
+    delta: ChoiceDelta
+    """A chat completion delta generated by streamed model responses."""
+
+    finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter", "function_call"]] = None
+    """The reason the model stopped generating tokens.
+
+    This will be `stop` if the model hit a natural stop point or a provided stop
+    sequence, `length` if the maximum number of tokens specified in the request was
+    reached, `content_filter` if content was omitted due to a flag from our content
+    filters, `tool_calls` if the model called a tool, or `function_call`
+    (deprecated) if the model called a function.
+    """
+
+    index: int
+    """The index of the choice in the list of choices."""
+
+    logprobs: Optional[ChoiceLogprobs] = None
+    """Log probability information for the choice."""
+
+
+class ChatCompletionChunk(BaseModel):
+    id: str
+    """A unique identifier for the chat completion. Each chunk has the same ID."""
+
+    choices: List[Choice]
+    """A list of chat completion choices.
+
+    Can contain more than one elements if `n` is greater than 1. Can also be empty
+    for the last chunk if you set `stream_options: {"include_usage": true}`.
+    """
+
+    created: int
+    """The Unix timestamp (in seconds) of when the chat completion was created.
+
+    Each chunk has the same timestamp.
+    """
+
+    model: str
+    """The model to generate the completion."""
+
+    object: Literal["chat.completion.chunk"]
+    """The object type, which is always `chat.completion.chunk`."""
+
+    service_tier: Optional[Literal["scale", "default"]] = None
+    """The service tier used for processing the request."""
+
+    system_fingerprint: Optional[str] = None
+    """
+    This fingerprint represents the backend configuration that the model runs with.
+    Can be used in conjunction with the `seed` request parameter to understand when
+    backend changes have been made that might impact determinism.
+    """
+
+    usage: Optional[CompletionUsage] = None
+    """
+    An optional field that will only be present when you set
+    `stream_options: {"include_usage": true}` in your request. When present, it
+    contains a null value **except for the last chunk** which contains the token
+    usage statistics for the entire request.
+
+    **NOTE:** If the stream is interrupted or cancelled, you may not receive the
+    final usage chunk which contains the total token usage for the request.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py
new file mode 100644
index 00000000..9d407324
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_image_param.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartImageParam", "ImageURL"]
+
+
+class ImageURL(TypedDict, total=False):
+    url: Required[str]
+    """Either a URL of the image or the base64 encoded image data."""
+
+    detail: Literal["auto", "low", "high"]
+    """Specifies the detail level of the image.
+
+    Learn more in the
+    [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
+    """
+
+
+class ChatCompletionContentPartImageParam(TypedDict, total=False):
+    image_url: Required[ImageURL]
+
+    type: Required[Literal["image_url"]]
+    """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py
new file mode 100644
index 00000000..0b1b1a80
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"]
+
+
+class InputAudio(TypedDict, total=False):
+    data: Required[str]
+    """Base64 encoded audio data."""
+
+    format: Required[Literal["wav", "mp3"]]
+    """The format of the encoded audio data. Currently supports "wav" and "mp3"."""
+
+
+class ChatCompletionContentPartInputAudioParam(TypedDict, total=False):
+    input_audio: Required[InputAudio]
+
+    type: Required[Literal["input_audio"]]
+    """The type of the content part. Always `input_audio`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py
new file mode 100644
index 00000000..cbedc853
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_param.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam
+from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam
+
+__all__ = ["ChatCompletionContentPartParam", "File", "FileFile"]
+
+
+class FileFile(TypedDict, total=False):
+    file_data: str
+    """
+    The base64 encoded file data, used when passing the file to the model as a
+    string.
+    """
+
+    file_id: str
+    """The ID of an uploaded file to use as input."""
+
+    filename: str
+    """The name of the file, used when passing the file to the model as a string."""
+
+
+class File(TypedDict, total=False):
+    file: Required[FileFile]
+
+    type: Required[Literal["file"]]
+    """The type of the content part. Always `file`."""
+
+
+ChatCompletionContentPartParam: TypeAlias = Union[
+    ChatCompletionContentPartTextParam,
+    ChatCompletionContentPartImageParam,
+    ChatCompletionContentPartInputAudioParam,
+    File,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py
new file mode 100644
index 00000000..c18c7db7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartRefusalParam"]
+
+
+class ChatCompletionContentPartRefusalParam(TypedDict, total=False):
+    refusal: Required[str]
+    """The refusal message generated by the model."""
+
+    type: Required[Literal["refusal"]]
+    """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py
new file mode 100644
index 00000000..a2707444
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_content_part_text_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartTextParam"]
+
+
+class ChatCompletionContentPartTextParam(TypedDict, total=False):
+    text: Required[str]
+    """The text content."""
+
+    type: Required[Literal["text"]]
+    """The type of the content part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py
new file mode 100644
index 00000000..0a541cb2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_deleted.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionDeleted"]
+
+
+class ChatCompletionDeleted(BaseModel):
+    id: str
+    """The ID of the chat completion that was deleted."""
+
+    deleted: bool
+    """Whether the chat completion was deleted."""
+
+    object: Literal["chat.completion.deleted"]
+    """The type of object being deleted."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py
new file mode 100644
index 00000000..01e4fdb6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_developer_message_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionDeveloperMessageParam"]
+
+
+class ChatCompletionDeveloperMessageParam(TypedDict, total=False):
+    content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+    """The contents of the developer message."""
+
+    role: Required[Literal["developer"]]
+    """The role of the messages author, in this case `developer`."""
+
+    name: str
+    """An optional name for the participant.
+
+    Provides the model information to differentiate between participants of the same
+    role.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py
new file mode 100644
index 00000000..2bc014af
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_call_option_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ChatCompletionFunctionCallOptionParam"]
+
+
+class ChatCompletionFunctionCallOptionParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py
new file mode 100644
index 00000000..5af12bf9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_function_message_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionFunctionMessageParam"]
+
+
+class ChatCompletionFunctionMessageParam(TypedDict, total=False):
+    content: Required[Optional[str]]
+    """The contents of the function message."""
+
+    name: Required[str]
+    """The name of the function to call."""
+
+    role: Required[Literal["function"]]
+    """The role of the messages author, in this case `function`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py
new file mode 100644
index 00000000..c659ac3d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message.py
@@ -0,0 +1,79 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .chat_completion_audio import ChatCompletionAudio
+from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
+
+__all__ = ["ChatCompletionMessage", "Annotation", "AnnotationURLCitation", "FunctionCall"]
+
+
+class AnnotationURLCitation(BaseModel):
+    end_index: int
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: int
+    """The index of the first character of the URL citation in the message."""
+
+    title: str
+    """The title of the web resource."""
+
+    url: str
+    """The URL of the web resource."""
+
+
+class Annotation(BaseModel):
+    type: Literal["url_citation"]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url_citation: AnnotationURLCitation
+    """A URL citation when using web search."""
+
+
+class FunctionCall(BaseModel):
+    arguments: str
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: str
+    """The name of the function to call."""
+
+
+class ChatCompletionMessage(BaseModel):
+    content: Optional[str] = None
+    """The contents of the message."""
+
+    refusal: Optional[str] = None
+    """The refusal message generated by the model."""
+
+    role: Literal["assistant"]
+    """The role of the author of this message."""
+
+    annotations: Optional[List[Annotation]] = None
+    """
+    Annotations for the message, when applicable, as when using the
+    [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+    """
+
+    audio: Optional[ChatCompletionAudio] = None
+    """
+    If the audio output modality is requested, this object contains data about the
+    audio response from the model.
+    [Learn more](https://platform.openai.com/docs/guides/audio).
+    """
+
+    function_call: Optional[FunctionCall] = None
+    """Deprecated and replaced by `tool_calls`.
+
+    The name and arguments of a function that should be called, as generated by the
+    model.
+    """
+
+    tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
+    """The tool calls generated by the model, such as function calls."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py
new file mode 100644
index 00000000..942da243
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .chat_completion_tool_message_param import ChatCompletionToolMessageParam
+from .chat_completion_user_message_param import ChatCompletionUserMessageParam
+from .chat_completion_system_message_param import ChatCompletionSystemMessageParam
+from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam
+from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
+from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam
+
+__all__ = ["ChatCompletionMessageParam"]
+
+ChatCompletionMessageParam: TypeAlias = Union[
+    ChatCompletionDeveloperMessageParam,
+    ChatCompletionSystemMessageParam,
+    ChatCompletionUserMessageParam,
+    ChatCompletionAssistantMessageParam,
+    ChatCompletionToolMessageParam,
+    ChatCompletionFunctionMessageParam,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py
new file mode 100644
index 00000000..4fec6670
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionMessageToolCall", "Function"]
+
+
+class Function(BaseModel):
+    arguments: str
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: str
+    """The name of the function to call."""
+
+
+class ChatCompletionMessageToolCall(BaseModel):
+    id: str
+    """The ID of the tool call."""
+
+    function: Function
+    """The function that the model called."""
+
+    type: Literal["function"]
+    """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py
new file mode 100644
index 00000000..f616c363
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionMessageToolCallParam", "Function"]
+
+
+class Function(TypedDict, total=False):
+    arguments: Required[str]
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: Required[str]
+    """The name of the function to call."""
+
+
+class ChatCompletionMessageToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the tool call."""
+
+    function: Required[Function]
+    """The function that the model called."""
+
+    type: Required[Literal["function"]]
+    """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py
new file mode 100644
index 00000000..8e3c1459
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_modality.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatCompletionModality"]
+
+ChatCompletionModality: TypeAlias = Literal["text", "audio"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py
new file mode 100644
index 00000000..369f8b42
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionNamedToolChoiceParam", "Function"]
+
+
+class Function(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to call."""
+
+
+class ChatCompletionNamedToolChoiceParam(TypedDict, total=False):
+    function: Required[Function]
+
+    type: Required[Literal["function"]]
+    """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py
new file mode 100644
index 00000000..c44e6e36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_prediction_content_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionPredictionContentParam"]
+
+
+class ChatCompletionPredictionContentParam(TypedDict, total=False):
+    content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+    """
+    The content that should be matched when generating a model response. If
+    generated tokens would match this content, the entire model response can be
+    returned much more quickly.
+    """
+
+    type: Required[Literal["content"]]
+    """The type of the predicted content you want to provide.
+
+    This type is currently always `content`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py
new file mode 100644
index 00000000..e4785c90
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_reasoning_effort.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..shared.reasoning_effort import ReasoningEffort
+
+__all__ = ["ChatCompletionReasoningEffort"]
+
+ChatCompletionReasoningEffort = ReasoningEffort
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py
new file mode 100644
index 00000000..3ec5e9ad
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_role.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatCompletionRole"]
+
+ChatCompletionRole: TypeAlias = Literal["developer", "system", "user", "assistant", "tool", "function"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py
new file mode 100644
index 00000000..95adc08a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_store_message.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .chat_completion_message import ChatCompletionMessage
+
+__all__ = ["ChatCompletionStoreMessage"]
+
+
+class ChatCompletionStoreMessage(ChatCompletionMessage):
+    id: str
+    """The identifier of the chat message."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py
new file mode 100644
index 00000000..471e0eba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_stream_options_param.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ChatCompletionStreamOptionsParam"]
+
+
+class ChatCompletionStreamOptionsParam(TypedDict, total=False):
+    include_usage: bool
+    """If set, an additional chunk will be streamed before the `data: [DONE]` message.
+
+    The `usage` field on this chunk shows the token usage statistics for the entire
+    request, and the `choices` field will always be an empty array.
+
+    All other chunks will also include a `usage` field, but with a null value.
+    **NOTE:** If the stream is interrupted, you may not receive the final usage
+    chunk which contains the total token usage for the request.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py
new file mode 100644
index 00000000..172ccea0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_system_message_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionSystemMessageParam"]
+
+
+class ChatCompletionSystemMessageParam(TypedDict, total=False):
+    content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+    """The contents of the system message."""
+
+    role: Required[Literal["system"]]
+    """The role of the messages author, in this case `system`."""
+
+    name: str
+    """An optional name for the participant.
+
+    Provides the model information to differentiate between participants of the same
+    role.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py
new file mode 100644
index 00000000..c69e2589
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_token_logprob.py
@@ -0,0 +1,57 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"]
+
+
+class TopLogprob(BaseModel):
+    token: str
+    """The token."""
+
+    bytes: Optional[List[int]] = None
+    """A list of integers representing the UTF-8 bytes representation of the token.
+
+    Useful in instances where characters are represented by multiple tokens and
+    their byte representations must be combined to generate the correct text
+    representation. Can be `null` if there is no bytes representation for the token.
+    """
+
+    logprob: float
+    """The log probability of this token, if it is within the top 20 most likely
+    tokens.
+
+    Otherwise, the value `-9999.0` is used to signify that the token is very
+    unlikely.
+    """
+
+
+class ChatCompletionTokenLogprob(BaseModel):
+    token: str
+    """The token."""
+
+    bytes: Optional[List[int]] = None
+    """A list of integers representing the UTF-8 bytes representation of the token.
+
+    Useful in instances where characters are represented by multiple tokens and
+    their byte representations must be combined to generate the correct text
+    representation. Can be `null` if there is no bytes representation for the token.
+    """
+
+    logprob: float
+    """The log probability of this token, if it is within the top 20 most likely
+    tokens.
+
+    Otherwise, the value `-9999.0` is used to signify that the token is very
+    unlikely.
+    """
+
+    top_logprobs: List[TopLogprob]
+    """List of the most likely tokens and their log probability, at this token
+    position.
+
+    In rare cases, there may be fewer than the number of requested `top_logprobs`
+    returned.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py
new file mode 100644
index 00000000..7dedf041
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam
+
+__all__ = ["ChatCompletionToolChoiceOptionParam"]
+
+ChatCompletionToolChoiceOptionParam: TypeAlias = Union[
+    Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py
new file mode 100644
index 00000000..eb5e270e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_message_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionToolMessageParam"]
+
+
+class ChatCompletionToolMessageParam(TypedDict, total=False):
+    content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+    """The contents of the tool message."""
+
+    role: Required[Literal["tool"]]
+    """The role of the messages author, in this case `tool`."""
+
+    tool_call_id: Required[str]
+    """Tool call that this message is responding to."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py
new file mode 100644
index 00000000..6c2b1a36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_tool_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.function_definition import FunctionDefinition
+
+__all__ = ["ChatCompletionToolParam"]
+
+
+class ChatCompletionToolParam(TypedDict, total=False):
+    function: Required[FunctionDefinition]
+
+    type: Required[Literal["function"]]
+    """The type of the tool. Currently, only `function` is supported."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py
new file mode 100644
index 00000000..5c15322a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/chat_completion_user_message_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_param import ChatCompletionContentPartParam
+
+__all__ = ["ChatCompletionUserMessageParam"]
+
+
+class ChatCompletionUserMessageParam(TypedDict, total=False):
+    content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]]
+    """The contents of the user message."""
+
+    role: Required[Literal["user"]]
+    """The role of the messages author, in this case `user`."""
+
+    name: str
+    """An optional name for the participant.
+
+    Provides the model information to differentiate between participants of the same
+    role.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py
new file mode 100644
index 00000000..05103fba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_create_params.py
@@ -0,0 +1,404 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared.chat_model import ChatModel
+from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from .chat_completion_tool_param import ChatCompletionToolParam
+from .chat_completion_audio_param import ChatCompletionAudioParam
+from .chat_completion_message_param import ChatCompletionMessageParam
+from ..shared_params.function_parameters import FunctionParameters
+from ..shared_params.response_format_text import ResponseFormatText
+from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from .chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
+from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
+from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
+from .chat_completion_function_call_option_param import ChatCompletionFunctionCallOptionParam
+
+__all__ = [
+    "CompletionCreateParamsBase",
+    "FunctionCall",
+    "Function",
+    "ResponseFormat",
+    "WebSearchOptions",
+    "WebSearchOptionsUserLocation",
+    "WebSearchOptionsUserLocationApproximate",
+    "CompletionCreateParamsNonStreaming",
+    "CompletionCreateParamsStreaming",
+]
+
+
+class CompletionCreateParamsBase(TypedDict, total=False):
+    messages: Required[Iterable[ChatCompletionMessageParam]]
+    """A list of messages comprising the conversation so far.
+
+    Depending on the [model](https://platform.openai.com/docs/models) you use,
+    different message types (modalities) are supported, like
+    [text](https://platform.openai.com/docs/guides/text-generation),
+    [images](https://platform.openai.com/docs/guides/vision), and
+    [audio](https://platform.openai.com/docs/guides/audio).
+    """
+
+    model: Required[Union[str, ChatModel]]
+    """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+    OpenAI offers a wide range of models with different capabilities, performance
+    characteristics, and price points. Refer to the
+    [model guide](https://platform.openai.com/docs/models) to browse and compare
+    available models.
+    """
+
+    audio: Optional[ChatCompletionAudioParam]
+    """Parameters for audio output.
+
+    Required when audio output is requested with `modalities: ["audio"]`.
+    [Learn more](https://platform.openai.com/docs/guides/audio).
+    """
+
+    frequency_penalty: Optional[float]
+    """Number between -2.0 and 2.0.
+
+    Positive values penalize new tokens based on their existing frequency in the
+    text so far, decreasing the model's likelihood to repeat the same line verbatim.
+    """
+
+    function_call: FunctionCall
+    """Deprecated in favor of `tool_choice`.
+
+    Controls which (if any) function is called by the model.
+
+    `none` means the model will not call a function and instead generates a message.
+
+    `auto` means the model can pick between generating a message or calling a
+    function.
+
+    Specifying a particular function via `{"name": "my_function"}` forces the model
+    to call that function.
+
+    `none` is the default when no functions are present. `auto` is the default if
+    functions are present.
+    """
+
+    functions: Iterable[Function]
+    """Deprecated in favor of `tools`.
+
+    A list of functions the model may generate JSON inputs for.
+    """
+
+    logit_bias: Optional[Dict[str, int]]
+    """Modify the likelihood of specified tokens appearing in the completion.
+
+    Accepts a JSON object that maps tokens (specified by their token ID in the
+    tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+    bias is added to the logits generated by the model prior to sampling. The exact
+    effect will vary per model, but values between -1 and 1 should decrease or
+    increase likelihood of selection; values like -100 or 100 should result in a ban
+    or exclusive selection of the relevant token.
+    """
+
+    logprobs: Optional[bool]
+    """Whether to return log probabilities of the output tokens or not.
+
+    If true, returns the log probabilities of each output token returned in the
+    `content` of `message`.
+    """
+
+    max_completion_tokens: Optional[int]
+    """
+    An upper bound for the number of tokens that can be generated for a completion,
+    including visible output tokens and
+    [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    max_tokens: Optional[int]
+    """
+    The maximum number of [tokens](/tokenizer) that can be generated in the chat
+    completion. This value can be used to control
+    [costs](https://openai.com/api/pricing/) for text generated via API.
+
+    This value is now deprecated in favor of `max_completion_tokens`, and is not
+    compatible with
+    [o1 series models](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    modalities: Optional[List[Literal["text", "audio"]]]
+    """
+    Output types that you would like the model to generate. Most models are capable
+    of generating text, which is the default:
+
+    `["text"]`
+
+    The `gpt-4o-audio-preview` model can also be used to
+    [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+    this model generate both text and audio responses, you can use:
+
+    `["text", "audio"]`
+    """
+
+    n: Optional[int]
+    """How many chat completion choices to generate for each input message.
+
+    Note that you will be charged based on the number of generated tokens across all
+    of the choices. Keep `n` as `1` to minimize costs.
+    """
+
+    parallel_tool_calls: bool
+    """
+    Whether to enable
+    [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
+    during tool use.
+    """
+
+    prediction: Optional[ChatCompletionPredictionContentParam]
+    """
+    Static predicted output content, such as the content of a text file that is
+    being regenerated.
+    """
+
+    presence_penalty: Optional[float]
+    """Number between -2.0 and 2.0.
+
+    Positive values penalize new tokens based on whether they appear in the text so
+    far, increasing the model's likelihood to talk about new topics.
+    """
+
+    reasoning_effort: Optional[ReasoningEffort]
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    response_format: ResponseFormat
+    """An object specifying the format that the model must output.
+
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which ensures the model will match your supplied JSON schema. Learn more
+    in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
+    """
+
+    seed: Optional[int]
+    """
+    This feature is in Beta. If specified, our system will make a best effort to
+    sample deterministically, such that repeated requests with the same `seed` and
+    parameters should return the same result. Determinism is not guaranteed, and you
+    should refer to the `system_fingerprint` response parameter to monitor changes
+    in the backend.
+    """
+
+    service_tier: Optional[Literal["auto", "default"]]
+    """Specifies the latency tier to use for processing the request.
+
+    This parameter is relevant for customers subscribed to the scale tier service:
+
+    - If set to 'auto', and the Project is Scale tier enabled, the system will
+      utilize scale tier credits until they are exhausted.
+    - If set to 'auto', and the Project is not Scale tier enabled, the request will
+      be processed using the default service tier with a lower uptime SLA and no
+      latency guarentee.
+    - If set to 'default', the request will be processed using the default service
+      tier with a lower uptime SLA and no latency guarentee.
+    - When not set, the default behavior is 'auto'.
+
+    When this parameter is set, the response body will include the `service_tier`
+    utilized.
+    """
+
+    stop: Union[Optional[str], List[str], None]
+    """Up to 4 sequences where the API will stop generating further tokens.
+
+    The returned text will not contain the stop sequence.
+    """
+
+    store: Optional[bool]
+    """
+    Whether or not to store the output of this chat completion request for use in
+    our [model distillation](https://platform.openai.com/docs/guides/distillation)
+    or [evals](https://platform.openai.com/docs/guides/evals) products.
+    """
+
+    stream_options: Optional[ChatCompletionStreamOptionsParam]
+    """Options for streaming response. Only set this when you set `stream: true`."""
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic. We generally recommend altering
+    this or `top_p` but not both.
+    """
+
+    tool_choice: ChatCompletionToolChoiceOptionParam
+    """
+    Controls which (if any) tool is called by the model. `none` means the model will
+    not call any tool and instead generates a message. `auto` means the model can
+    pick between generating a message or calling one or more tools. `required` means
+    the model must call one or more tools. Specifying a particular tool via
+    `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+    call that tool.
+
+    `none` is the default when no tools are present. `auto` is the default if tools
+    are present.
+    """
+
+    tools: Iterable[ChatCompletionToolParam]
+    """A list of tools the model may call.
+
+    Currently, only functions are supported as a tool. Use this to provide a list of
+    functions the model may generate JSON inputs for. A max of 128 functions are
+    supported.
+    """
+
+    top_logprobs: Optional[int]
+    """
+    An integer between 0 and 20 specifying the number of most likely tokens to
+    return at each token position, each with an associated log probability.
+    `logprobs` must be set to `true` if this parameter is used.
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or `temperature` but not both.
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
+
+    web_search_options: WebSearchOptions
+    """
+    This tool searches the web for relevant results to use in a response. Learn more
+    about the
+    [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+    """
+
+
+FunctionCall: TypeAlias = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam]
+
+
+class Function(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to be called.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    description: str
+    """
+    A description of what the function does, used by the model to choose when and
+    how to call the function.
+    """
+
+    parameters: FunctionParameters
+    """The parameters the functions accepts, described as a JSON Schema object.
+
+    See the [guide](https://platform.openai.com/docs/guides/function-calling) for
+    examples, and the
+    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+    documentation about the format.
+
+    Omitting `parameters` defines a function with an empty parameter list.
+    """
+
+
+ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
+
+
+class WebSearchOptionsUserLocationApproximate(TypedDict, total=False):
+    city: str
+    """Free text input for the city of the user, e.g. `San Francisco`."""
+
+    country: str
+    """
+    The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+    the user, e.g. `US`.
+    """
+
+    region: str
+    """Free text input for the region of the user, e.g. `California`."""
+
+    timezone: str
+    """
+    The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+    user, e.g. `America/Los_Angeles`.
+    """
+
+
+class WebSearchOptionsUserLocation(TypedDict, total=False):
+    approximate: Required[WebSearchOptionsUserLocationApproximate]
+    """Approximate location parameters for the search."""
+
+    type: Required[Literal["approximate"]]
+    """The type of location approximation. Always `approximate`."""
+
+
+class WebSearchOptions(TypedDict, total=False):
+    search_context_size: Literal["low", "medium", "high"]
+    """
+    High level guidance for the amount of context window space to use for the
+    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """
+
+    user_location: Optional[WebSearchOptionsUserLocation]
+    """Approximate location parameters for the search."""
+
+
+class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+    for more information, along with the
+    [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+    guide for more information on how to handle the streaming events.
+    """
+
+
+class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
+    for more information, along with the
+    [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
+    guide for more information on how to handle the streaming events.
+    """
+
+
+CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py
new file mode 100644
index 00000000..d93da834
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_list_params.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = ["CompletionListParams"]
+
+
+class CompletionListParams(TypedDict, total=False):
+    after: str
+    """Identifier for the last chat completion from the previous pagination request."""
+
+    limit: int
+    """Number of Chat Completions to retrieve."""
+
+    metadata: Optional[Metadata]
+    """A list of metadata keys to filter the Chat Completions by. Example:
+
+    `metadata[key1]=value1&metadata[key2]=value2`
+    """
+
+    model: str
+    """The model used to generate the Chat Completions."""
+
+    order: Literal["asc", "desc"]
+    """Sort order for Chat Completions by timestamp.
+
+    Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py
new file mode 100644
index 00000000..fc71733f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completion_update_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = ["CompletionUpdateParams"]
+
+
+class CompletionUpdateParams(TypedDict, total=False):
+    metadata: Required[Optional[Metadata]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py
new file mode 100644
index 00000000..b8e62d6a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/__init__.py
@@ -0,0 +1,5 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .message_list_params import MessageListParams as MessageListParams
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py
new file mode 100644
index 00000000..4e694e83
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/completions/message_list_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["MessageListParams"]
+
+
+class MessageListParams(TypedDict, total=False):
+    after: str
+    """Identifier for the last message from the previous pagination request."""
+
+    limit: int
+    """Number of messages to retrieve."""
+
+    order: Literal["asc", "desc"]
+    """Sort order for messages by timestamp.
+
+    Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py
new file mode 100644
index 00000000..4b11dac5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_chat_completion.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Generic, TypeVar, Optional
+
+from ..._models import GenericModel
+from .chat_completion import Choice, ChatCompletion
+from .chat_completion_message import ChatCompletionMessage
+from .parsed_function_tool_call import ParsedFunctionToolCall
+
+__all__ = ["ParsedChatCompletion", "ParsedChoice"]
+
+
+ContentType = TypeVar("ContentType")
+
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedChatCompletionMessage(ChatCompletionMessage, GenericModel, Generic[ContentType]):
+    parsed: Optional[ContentType] = None
+    """The auto-parsed message contents"""
+
+    tool_calls: Optional[List[ParsedFunctionToolCall]] = None  # type: ignore[assignment]
+    """The tool calls generated by the model, such as function calls."""
+
+
+class ParsedChoice(Choice, GenericModel, Generic[ContentType]):
+    message: ParsedChatCompletionMessage[ContentType]
+    """A chat completion message generated by the model."""
+
+
+class ParsedChatCompletion(ChatCompletion, GenericModel, Generic[ContentType]):
+    choices: List[ParsedChoice[ContentType]]  # type: ignore[assignment]
+    """A list of chat completion choices.
+
+    Can be more than one if `n` is greater than 1.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py
new file mode 100644
index 00000000..3e90789f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat/parsed_function_tool_call.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall
+
+__all__ = ["ParsedFunctionToolCall", "ParsedFunction"]
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedFunction(Function):
+    parsed_arguments: Optional[object] = None
+    """
+    The arguments to call the function with.
+
+    If you used `openai.pydantic_function_tool()` then this will be an
+    instance of the given `BaseModel`.
+
+    Otherwise, this will be the parsed JSON arguments.
+    """
+
+
+class ParsedFunctionToolCall(ChatCompletionMessageToolCall):
+    function: ParsedFunction
+    """The function that the model called."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/chat_model.py b/.venv/lib/python3.12/site-packages/openai/types/chat_model.py
new file mode 100644
index 00000000..9304d195
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/chat_model.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .shared import chat_model
+
+__all__ = ["ChatModel"]
+
+ChatModel = chat_model.ChatModel
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion.py b/.venv/lib/python3.12/site-packages/openai/types/completion.py
new file mode 100644
index 00000000..d3b3102a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .completion_usage import CompletionUsage
+from .completion_choice import CompletionChoice
+
+__all__ = ["Completion"]
+
+
+class Completion(BaseModel):
+    id: str
+    """A unique identifier for the completion."""
+
+    choices: List[CompletionChoice]
+    """The list of completion choices the model generated for the input prompt."""
+
+    created: int
+    """The Unix timestamp (in seconds) of when the completion was created."""
+
+    model: str
+    """The model used for completion."""
+
+    object: Literal["text_completion"]
+    """The object type, which is always "text_completion" """
+
+    system_fingerprint: Optional[str] = None
+    """This fingerprint represents the backend configuration that the model runs with.
+
+    Can be used in conjunction with the `seed` request parameter to understand when
+    backend changes have been made that might impact determinism.
+    """
+
+    usage: Optional[CompletionUsage] = None
+    """Usage statistics for the completion request."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion_choice.py b/.venv/lib/python3.12/site-packages/openai/types/completion_choice.py
new file mode 100644
index 00000000..d948ebc9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion_choice.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["CompletionChoice", "Logprobs"]
+
+
+class Logprobs(BaseModel):
+    text_offset: Optional[List[int]] = None
+
+    token_logprobs: Optional[List[float]] = None
+
+    tokens: Optional[List[str]] = None
+
+    top_logprobs: Optional[List[Dict[str, float]]] = None
+
+
+class CompletionChoice(BaseModel):
+    finish_reason: Literal["stop", "length", "content_filter"]
+    """The reason the model stopped generating tokens.
+
+    This will be `stop` if the model hit a natural stop point or a provided stop
+    sequence, `length` if the maximum number of tokens specified in the request was
+    reached, or `content_filter` if content was omitted due to a flag from our
+    content filters.
+    """
+
+    index: int
+
+    logprobs: Optional[Logprobs] = None
+
+    text: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py
new file mode 100644
index 00000000..fdb1680d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion_create_params.py
@@ -0,0 +1,187 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+
+__all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"]
+
+
+class CompletionCreateParamsBase(TypedDict, total=False):
+    model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
+    """ID of the model to use.
+
+    You can use the
+    [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+    see all of your available models, or see our
+    [Model overview](https://platform.openai.com/docs/models) for descriptions of
+    them.
+    """
+
+    prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]]
+    """
+    The prompt(s) to generate completions for, encoded as a string, array of
+    strings, array of tokens, or array of token arrays.
+
+    Note that <|endoftext|> is the document separator that the model sees during
+    training, so if a prompt is not specified the model will generate as if from the
+    beginning of a new document.
+    """
+
+    best_of: Optional[int]
+    """
+    Generates `best_of` completions server-side and returns the "best" (the one with
+    the highest log probability per token). Results cannot be streamed.
+
+    When used with `n`, `best_of` controls the number of candidate completions and
+    `n` specifies how many to return – `best_of` must be greater than `n`.
+
+    **Note:** Because this parameter generates many completions, it can quickly
+    consume your token quota. Use carefully and ensure that you have reasonable
+    settings for `max_tokens` and `stop`.
+    """
+
+    echo: Optional[bool]
+    """Echo back the prompt in addition to the completion"""
+
+    frequency_penalty: Optional[float]
+    """Number between -2.0 and 2.0.
+
+    Positive values penalize new tokens based on their existing frequency in the
+    text so far, decreasing the model's likelihood to repeat the same line verbatim.
+
+    [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+    """
+
+    logit_bias: Optional[Dict[str, int]]
+    """Modify the likelihood of specified tokens appearing in the completion.
+
+    Accepts a JSON object that maps tokens (specified by their token ID in the GPT
+    tokenizer) to an associated bias value from -100 to 100. You can use this
+    [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+    Mathematically, the bias is added to the logits generated by the model prior to
+    sampling. The exact effect will vary per model, but values between -1 and 1
+    should decrease or increase likelihood of selection; values like -100 or 100
+    should result in a ban or exclusive selection of the relevant token.
+
+    As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
+    from being generated.
+    """
+
+    logprobs: Optional[int]
+    """
+    Include the log probabilities on the `logprobs` most likely output tokens, as
+    well the chosen tokens. For example, if `logprobs` is 5, the API will return a
+    list of the 5 most likely tokens. The API will always return the `logprob` of
+    the sampled token, so there may be up to `logprobs+1` elements in the response.
+
+    The maximum value for `logprobs` is 5.
+    """
+
+    max_tokens: Optional[int]
+    """
+    The maximum number of [tokens](/tokenizer) that can be generated in the
+    completion.
+
+    The token count of your prompt plus `max_tokens` cannot exceed the model's
+    context length.
+    [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+    for counting tokens.
+    """
+
+    n: Optional[int]
+    """How many completions to generate for each prompt.
+
+    **Note:** Because this parameter generates many completions, it can quickly
+    consume your token quota. Use carefully and ensure that you have reasonable
+    settings for `max_tokens` and `stop`.
+    """
+
+    presence_penalty: Optional[float]
+    """Number between -2.0 and 2.0.
+
+    Positive values penalize new tokens based on whether they appear in the text so
+    far, increasing the model's likelihood to talk about new topics.
+
+    [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
+    """
+
+    seed: Optional[int]
+    """
+    If specified, our system will make a best effort to sample deterministically,
+    such that repeated requests with the same `seed` and parameters should return
+    the same result.
+
+    Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+    response parameter to monitor changes in the backend.
+    """
+
+    stop: Union[Optional[str], List[str], None]
+    """Up to 4 sequences where the API will stop generating further tokens.
+
+    The returned text will not contain the stop sequence.
+    """
+
+    stream_options: Optional[ChatCompletionStreamOptionsParam]
+    """Options for streaming response. Only set this when you set `stream: true`."""
+
+    suffix: Optional[str]
+    """The suffix that comes after a completion of inserted text.
+
+    This parameter is only supported for `gpt-3.5-turbo-instruct`.
+    """
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+
+    We generally recommend altering this or `top_p` but not both.
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or `temperature` but not both.
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
+
+
+class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """Whether to stream back partial progress.
+
+    If set, tokens will be sent as data-only
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+    as they become available, with the stream terminated by a `data: [DONE]`
+    message.
+    [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+    """
+
+
+class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
+    stream: Required[Literal[True]]
+    """Whether to stream back partial progress.
+
+    If set, tokens will be sent as data-only
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+    as they become available, with the stream terminated by a `data: [DONE]`
+    message.
+    [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
+    """
+
+
+CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/completion_usage.py b/.venv/lib/python3.12/site-packages/openai/types/completion_usage.py
new file mode 100644
index 00000000..d8c4e84c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/completion_usage.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"]
+
+
+class CompletionTokensDetails(BaseModel):
+    accepted_prediction_tokens: Optional[int] = None
+    """
+    When using Predicted Outputs, the number of tokens in the prediction that
+    appeared in the completion.
+    """
+
+    audio_tokens: Optional[int] = None
+    """Audio input tokens generated by the model."""
+
+    reasoning_tokens: Optional[int] = None
+    """Tokens generated by the model for reasoning."""
+
+    rejected_prediction_tokens: Optional[int] = None
+    """
+    When using Predicted Outputs, the number of tokens in the prediction that did
+    not appear in the completion. However, like reasoning tokens, these tokens are
+    still counted in the total completion tokens for purposes of billing, output,
+    and context window limits.
+    """
+
+
+class PromptTokensDetails(BaseModel):
+    audio_tokens: Optional[int] = None
+    """Audio input tokens present in the prompt."""
+
+    cached_tokens: Optional[int] = None
+    """Cached tokens present in the prompt."""
+
+
+class CompletionUsage(BaseModel):
+    completion_tokens: int
+    """Number of tokens in the generated completion."""
+
+    prompt_tokens: int
+    """Number of tokens in the prompt."""
+
+    total_tokens: int
+    """Total number of tokens used in the request (prompt + completion)."""
+
+    completion_tokens_details: Optional[CompletionTokensDetails] = None
+    """Breakdown of tokens used in a completion."""
+
+    prompt_tokens_details: Optional[PromptTokensDetails] = None
+    """Breakdown of tokens used in the prompt."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py b/.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py
new file mode 100644
index 00000000..eff247a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/create_embedding_response.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .embedding import Embedding
+
+__all__ = ["CreateEmbeddingResponse", "Usage"]
+
+
+class Usage(BaseModel):
+    prompt_tokens: int
+    """The number of tokens used by the prompt."""
+
+    total_tokens: int
+    """The total number of tokens used by the request."""
+
+
+class CreateEmbeddingResponse(BaseModel):
+    data: List[Embedding]
+    """The list of embeddings generated by the model."""
+
+    model: str
+    """The name of the model used to generate the embedding."""
+
+    object: Literal["list"]
+    """The object type, which is always "list"."""
+
+    usage: Usage
+    """The usage information for the request."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/embedding.py b/.venv/lib/python3.12/site-packages/openai/types/embedding.py
new file mode 100644
index 00000000..769b1d16
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/embedding.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Embedding"]
+
+
+class Embedding(BaseModel):
+    embedding: List[float]
+    """The embedding vector, which is a list of floats.
+
+    The length of vector depends on the model as listed in the
+    [embedding guide](https://platform.openai.com/docs/guides/embeddings).
+    """
+
+    index: int
+    """The index of the embedding in the list of embeddings."""
+
+    object: Literal["embedding"]
+    """The object type, which is always "embedding"."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py
new file mode 100644
index 00000000..a9056644
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/embedding_create_params.py
@@ -0,0 +1,53 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .embedding_model import EmbeddingModel
+
+__all__ = ["EmbeddingCreateParams"]
+
+
+class EmbeddingCreateParams(TypedDict, total=False):
+    input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]]
+    """Input text to embed, encoded as a string or array of tokens.
+
+    To embed multiple inputs in a single request, pass an array of strings or array
+    of token arrays. The input must not exceed the max input tokens for the model
+    (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any
+    array must be 2048 dimensions or less.
+    [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
+    for counting tokens. Some models may also impose a limit on total number of
+    tokens summed across inputs.
+    """
+
+    model: Required[Union[str, EmbeddingModel]]
+    """ID of the model to use.
+
+    You can use the
+    [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+    see all of your available models, or see our
+    [Model overview](https://platform.openai.com/docs/models) for descriptions of
+    them.
+    """
+
+    dimensions: int
+    """The number of dimensions the resulting output embeddings should have.
+
+    Only supported in `text-embedding-3` and later models.
+    """
+
+    encoding_format: Literal["float", "base64"]
+    """The format to return the embeddings in.
+
+    Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/embedding_model.py b/.venv/lib/python3.12/site-packages/openai/types/embedding_model.py
new file mode 100644
index 00000000..075ff976
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/embedding_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["EmbeddingModel"]
+
+EmbeddingModel: TypeAlias = Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py
new file mode 100644
index 00000000..ee96bd78
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject
+from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject
+
+__all__ = ["FileChunkingStrategy"]
+
+FileChunkingStrategy: TypeAlias = Annotated[
+    Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py
new file mode 100644
index 00000000..25d94286
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_chunking_strategy_param.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam
+from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam
+
+__all__ = ["FileChunkingStrategyParam"]
+
+FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_content.py b/.venv/lib/python3.12/site-packages/openai/types/file_content.py
new file mode 100644
index 00000000..d89eee62
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_content.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import TypeAlias
+
+__all__ = ["FileContent"]
+
+FileContent: TypeAlias = str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/file_create_params.py
new file mode 100644
index 00000000..728dfd35
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_create_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .._types import FileTypes
+from .file_purpose import FilePurpose
+
+__all__ = ["FileCreateParams"]
+
+
+class FileCreateParams(TypedDict, total=False):
+    file: Required[FileTypes]
+    """The File object (not file name) to be uploaded."""
+
+    purpose: Required[FilePurpose]
+    """The intended purpose of the uploaded file.
+
+    One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch
+    API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision
+    fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used
+    for eval data sets
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/file_deleted.py
new file mode 100644
index 00000000..f25fa87a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileDeleted"]
+
+
+class FileDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["file"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/file_list_params.py
new file mode 100644
index 00000000..058d874c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_list_params.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["FileListParams"]
+
+
+class FileListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 10,000, and the default is 10,000.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
+
+    purpose: str
+    """Only return files with the given purpose."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_object.py b/.venv/lib/python3.12/site-packages/openai/types/file_object.py
new file mode 100644
index 00000000..1d65e698
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_object.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileObject"]
+
+
+class FileObject(BaseModel):
+    id: str
+    """The file identifier, which can be referenced in the API endpoints."""
+
+    bytes: int
+    """The size of the file, in bytes."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the file was created."""
+
+    filename: str
+    """The name of the file."""
+
+    object: Literal["file"]
+    """The object type, which is always `file`."""
+
+    purpose: Literal[
+        "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision"
+    ]
+    """The intended purpose of the file.
+
+    Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`,
+    `fine-tune`, `fine-tune-results` and `vision`.
+    """
+
+    status: Literal["uploaded", "processed", "error"]
+    """Deprecated.
+
+    The current status of the file, which can be either `uploaded`, `processed`, or
+    `error`.
+    """
+
+    expires_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the file will expire."""
+
+    status_details: Optional[str] = None
+    """Deprecated.
+
+    For details on why a fine-tuning training file failed validation, see the
+    `error` field on `fine_tuning.job`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/file_purpose.py b/.venv/lib/python3.12/site-packages/openai/types/file_purpose.py
new file mode 100644
index 00000000..b2c2d5f9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/file_purpose.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["FilePurpose"]
+
+FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py
new file mode 100644
index 00000000..92b81329
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .fine_tuning_job import FineTuningJob as FineTuningJob
+from .job_list_params import JobListParams as JobListParams
+from .job_create_params import JobCreateParams as JobCreateParams
+from .fine_tuning_job_event import FineTuningJobEvent as FineTuningJobEvent
+from .job_list_events_params import JobListEventsParams as JobListEventsParams
+from .fine_tuning_job_integration import FineTuningJobIntegration as FineTuningJobIntegration
+from .fine_tuning_job_wandb_integration import FineTuningJobWandbIntegration as FineTuningJobWandbIntegration
+from .fine_tuning_job_wandb_integration_object import (
+    FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py
new file mode 100644
index 00000000..c7fff2b7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job.py
@@ -0,0 +1,223 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.metadata import Metadata
+from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject
+
+__all__ = [
+    "FineTuningJob",
+    "Error",
+    "Hyperparameters",
+    "Method",
+    "MethodDpo",
+    "MethodDpoHyperparameters",
+    "MethodSupervised",
+    "MethodSupervisedHyperparameters",
+]
+
+
+class Error(BaseModel):
+    code: str
+    """A machine-readable error code."""
+
+    message: str
+    """A human-readable error message."""
+
+    param: Optional[str] = None
+    """The parameter that was invalid, usually `training_file` or `validation_file`.
+
+    This field will be null if the failure was not parameter-specific.
+    """
+
+
+class Hyperparameters(BaseModel):
+    batch_size: Union[Literal["auto"], int, None] = None
+    """Number of examples in each batch.
+
+    A larger batch size means that model parameters are updated less frequently, but
+    with lower variance.
+    """
+
+    learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+    """Scaling factor for the learning rate.
+
+    A smaller learning rate may be useful to avoid overfitting.
+    """
+
+    n_epochs: Union[Literal["auto"], int, None] = None
+    """The number of epochs to train the model for.
+
+    An epoch refers to one full cycle through the training dataset.
+    """
+
+
+class MethodDpoHyperparameters(BaseModel):
+    batch_size: Union[Literal["auto"], int, None] = None
+    """Number of examples in each batch.
+
+    A larger batch size means that model parameters are updated less frequently, but
+    with lower variance.
+    """
+
+    beta: Union[Literal["auto"], float, None] = None
+    """The beta value for the DPO method.
+
+    A higher beta value will increase the weight of the penalty between the policy
+    and reference model.
+    """
+
+    learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+    """Scaling factor for the learning rate.
+
+    A smaller learning rate may be useful to avoid overfitting.
+    """
+
+    n_epochs: Union[Literal["auto"], int, None] = None
+    """The number of epochs to train the model for.
+
+    An epoch refers to one full cycle through the training dataset.
+    """
+
+
+class MethodDpo(BaseModel):
+    hyperparameters: Optional[MethodDpoHyperparameters] = None
+    """The hyperparameters used for the fine-tuning job."""
+
+
+class MethodSupervisedHyperparameters(BaseModel):
+    batch_size: Union[Literal["auto"], int, None] = None
+    """Number of examples in each batch.
+
+    A larger batch size means that model parameters are updated less frequently, but
+    with lower variance.
+    """
+
+    learning_rate_multiplier: Union[Literal["auto"], float, None] = None
+    """Scaling factor for the learning rate.
+
+    A smaller learning rate may be useful to avoid overfitting.
+    """
+
+    n_epochs: Union[Literal["auto"], int, None] = None
+    """The number of epochs to train the model for.
+
+    An epoch refers to one full cycle through the training dataset.
+    """
+
+
+class MethodSupervised(BaseModel):
+    hyperparameters: Optional[MethodSupervisedHyperparameters] = None
+    """The hyperparameters used for the fine-tuning job."""
+
+
+class Method(BaseModel):
+    dpo: Optional[MethodDpo] = None
+    """Configuration for the DPO fine-tuning method."""
+
+    supervised: Optional[MethodSupervised] = None
+    """Configuration for the supervised fine-tuning method."""
+
+    type: Optional[Literal["supervised", "dpo"]] = None
+    """The type of method. Is either `supervised` or `dpo`."""
+
+
+class FineTuningJob(BaseModel):
+    id: str
+    """The object identifier, which can be referenced in the API endpoints."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
+
+    error: Optional[Error] = None
+    """
+    For fine-tuning jobs that have `failed`, this will contain more information on
+    the cause of the failure.
+    """
+
+    fine_tuned_model: Optional[str] = None
+    """The name of the fine-tuned model that is being created.
+
+    The value will be null if the fine-tuning job is still running.
+    """
+
+    finished_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the fine-tuning job was finished.
+
+    The value will be null if the fine-tuning job is still running.
+    """
+
+    hyperparameters: Hyperparameters
+    """The hyperparameters used for the fine-tuning job.
+
+    This value will only be returned when running `supervised` jobs.
+    """
+
+    model: str
+    """The base model that is being fine-tuned."""
+
+    object: Literal["fine_tuning.job"]
+    """The object type, which is always "fine_tuning.job"."""
+
+    organization_id: str
+    """The organization that owns the fine-tuning job."""
+
+    result_files: List[str]
+    """The compiled results file ID(s) for the fine-tuning job.
+
+    You can retrieve the results with the
+    [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
+    """
+
+    seed: int
+    """The seed used for the fine-tuning job."""
+
+    status: Literal["validating_files", "queued", "running", "succeeded", "failed", "cancelled"]
+    """
+    The current status of the fine-tuning job, which can be either
+    `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
+    """
+
+    trained_tokens: Optional[int] = None
+    """The total number of billable tokens processed by this fine-tuning job.
+
+    The value will be null if the fine-tuning job is still running.
+    """
+
+    training_file: str
+    """The file ID used for training.
+
+    You can retrieve the training data with the
+    [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
+    """
+
+    validation_file: Optional[str] = None
+    """The file ID used for validation.
+
+    You can retrieve the validation results with the
+    [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
+    """
+
+    estimated_finish: Optional[int] = None
+    """
+    The Unix timestamp (in seconds) for when the fine-tuning job is estimated to
+    finish. The value will be null if the fine-tuning job is not running.
+    """
+
+    integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None
+    """A list of integrations to enable for this fine-tuning job."""
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    method: Optional[Method] = None
+    """The method used for fine-tuning."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py
new file mode 100644
index 00000000..1d728bd7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import builtins
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FineTuningJobEvent"]
+
+
+class FineTuningJobEvent(BaseModel):
+    id: str
+    """The object identifier."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
+
+    level: Literal["info", "warn", "error"]
+    """The log level of the event."""
+
+    message: str
+    """The message of the event."""
+
+    object: Literal["fine_tuning.job.event"]
+    """The object type, which is always "fine_tuning.job.event"."""
+
+    data: Optional[builtins.object] = None
+    """The data associated with the event."""
+
+    type: Optional[Literal["message", "metrics"]] = None
+    """The type of event."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py
new file mode 100644
index 00000000..9a66aa4f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_integration.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject
+
+FineTuningJobIntegration = FineTuningJobWandbIntegrationObject
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py
new file mode 100644
index 00000000..4ac282eb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FineTuningJobWandbIntegration"]
+
+
+class FineTuningJobWandbIntegration(BaseModel):
+    project: str
+    """The name of the project that the new run will be created under."""
+
+    entity: Optional[str] = None
+    """The entity to use for the run.
+
+    This allows you to set the team or username of the WandB user that you would
+    like associated with the run. If not set, the default entity for the registered
+    WandB API key is used.
+    """
+
+    name: Optional[str] = None
+    """A display name to set for the run.
+
+    If not set, we will use the Job ID as the name.
+    """
+
+    tags: Optional[List[str]] = None
+    """A list of tags to be attached to the newly created run.
+
+    These tags are passed through directly to WandB. Some default tags are generated
+    by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py
new file mode 100644
index 00000000..5b94354d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .fine_tuning_job_wandb_integration import FineTuningJobWandbIntegration
+
+__all__ = ["FineTuningJobWandbIntegrationObject"]
+
+
+class FineTuningJobWandbIntegrationObject(BaseModel):
+    type: Literal["wandb"]
+    """The type of the integration being enabled for the fine-tuning job"""
+
+    wandb: FineTuningJobWandbIntegration
+    """The settings for your integration with Weights and Biases.
+
+    This payload specifies the project that metrics will be sent to. Optionally, you
+    can set an explicit display name for your run, add tags to your run, and set a
+    default entity (team, username, etc) to be associated with your run.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py
new file mode 100644
index 00000000..f4cf980b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_create_params.py
@@ -0,0 +1,236 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.metadata import Metadata
+
+__all__ = [
+    "JobCreateParams",
+    "Hyperparameters",
+    "Integration",
+    "IntegrationWandb",
+    "Method",
+    "MethodDpo",
+    "MethodDpoHyperparameters",
+    "MethodSupervised",
+    "MethodSupervisedHyperparameters",
+]
+
+
+class JobCreateParams(TypedDict, total=False):
+    model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]]
+    """The name of the model to fine-tune.
+
+    You can select one of the
+    [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
+    """
+
+    training_file: Required[str]
+    """The ID of an uploaded file that contains training data.
+
+    See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+    for how to upload a file.
+
+    Your dataset must be formatted as a JSONL file. Additionally, you must upload
+    your file with the purpose `fine-tune`.
+
+    The contents of the file should differ depending on if the model uses the
+    [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
+    [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
+    format, or if the fine-tuning method uses the
+    [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
+    format.
+
+    See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+    for more details.
+    """
+
+    hyperparameters: Hyperparameters
+    """
+    The hyperparameters used for the fine-tuning job. This value is now deprecated
+    in favor of `method`, and should be passed in under the `method` parameter.
+    """
+
+    integrations: Optional[Iterable[Integration]]
+    """A list of integrations to enable for your fine-tuning job."""
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    method: Method
+    """The method used for fine-tuning."""
+
+    seed: Optional[int]
+    """The seed controls the reproducibility of the job.
+
+    Passing in the same seed and job parameters should produce the same results, but
+    may differ in rare cases. If a seed is not specified, one will be generated for
+    you.
+    """
+
+    suffix: Optional[str]
+    """
+    A string of up to 64 characters that will be added to your fine-tuned model
+    name.
+
+    For example, a `suffix` of "custom-model-name" would produce a model name like
+    `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
+    """
+
+    validation_file: Optional[str]
+    """The ID of an uploaded file that contains validation data.
+
+    If you provide this file, the data is used to generate validation metrics
+    periodically during fine-tuning. These metrics can be viewed in the fine-tuning
+    results file. The same data should not be present in both train and validation
+    files.
+
+    Your dataset must be formatted as a JSONL file. You must upload your file with
+    the purpose `fine-tune`.
+
+    See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+    for more details.
+    """
+
+
+class Hyperparameters(TypedDict, total=False):
+    batch_size: Union[Literal["auto"], int]
+    """Number of examples in each batch.
+
+    A larger batch size means that model parameters are updated less frequently, but
+    with lower variance.
+    """
+
+    learning_rate_multiplier: Union[Literal["auto"], float]
+    """Scaling factor for the learning rate.
+
+    A smaller learning rate may be useful to avoid overfitting.
+    """
+
+    n_epochs: Union[Literal["auto"], int]
+    """The number of epochs to train the model for.
+
+    An epoch refers to one full cycle through the training dataset.
+    """
+
+
+class IntegrationWandb(TypedDict, total=False):
+    project: Required[str]
+    """The name of the project that the new run will be created under."""
+
+    entity: Optional[str]
+    """The entity to use for the run.
+
+    This allows you to set the team or username of the WandB user that you would
+    like associated with the run. If not set, the default entity for the registered
+    WandB API key is used.
+    """
+
+    name: Optional[str]
+    """A display name to set for the run.
+
+    If not set, we will use the Job ID as the name.
+    """
+
+    tags: List[str]
+    """A list of tags to be attached to the newly created run.
+
+    These tags are passed through directly to WandB. Some default tags are generated
+    by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
+    """
+
+
+class Integration(TypedDict, total=False):
+    type: Required[Literal["wandb"]]
+    """The type of integration to enable.
+
+    Currently, only "wandb" (Weights and Biases) is supported.
+    """
+
+    wandb: Required[IntegrationWandb]
+    """The settings for your integration with Weights and Biases.
+
+    This payload specifies the project that metrics will be sent to. Optionally, you
+    can set an explicit display name for your run, add tags to your run, and set a
+    default entity (team, username, etc) to be associated with your run.
+    """
+
+
+class MethodDpoHyperparameters(TypedDict, total=False):
+    batch_size: Union[Literal["auto"], int]
+    """Number of examples in each batch.
+
+    A larger batch size means that model parameters are updated less frequently, but
+    with lower variance.
+    """
+
+    beta: Union[Literal["auto"], float]
+    """The beta value for the DPO method.
+
+    A higher beta value will increase the weight of the penalty between the policy
+    and reference model.
+    """
+
+    learning_rate_multiplier: Union[Literal["auto"], float]
+    """Scaling factor for the learning rate.
+
+    A smaller learning rate may be useful to avoid overfitting.
+    """
+
+    n_epochs: Union[Literal["auto"], int]
+    """The number of epochs to train the model for.
+
+    An epoch refers to one full cycle through the training dataset.
+    """
+
+
+class MethodDpo(TypedDict, total=False):
+    hyperparameters: MethodDpoHyperparameters
+    """The hyperparameters used for the fine-tuning job."""
+
+
+class MethodSupervisedHyperparameters(TypedDict, total=False):
+    batch_size: Union[Literal["auto"], int]
+    """Number of examples in each batch.
+
+    A larger batch size means that model parameters are updated less frequently, but
+    with lower variance.
+    """
+
+    learning_rate_multiplier: Union[Literal["auto"], float]
+    """Scaling factor for the learning rate.
+
+    A smaller learning rate may be useful to avoid overfitting.
+    """
+
+    n_epochs: Union[Literal["auto"], int]
+    """The number of epochs to train the model for.
+
+    An epoch refers to one full cycle through the training dataset.
+    """
+
+
+class MethodSupervised(TypedDict, total=False):
+    hyperparameters: MethodSupervisedHyperparameters
+    """The hyperparameters used for the fine-tuning job."""
+
+
+class Method(TypedDict, total=False):
+    dpo: MethodDpo
+    """Configuration for the DPO fine-tuning method."""
+
+    supervised: MethodSupervised
+    """Configuration for the supervised fine-tuning method."""
+
+    type: Literal["supervised", "dpo"]
+    """The type of method. Is either `supervised` or `dpo`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py
new file mode 100644
index 00000000..e1c9a64d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_events_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["JobListEventsParams"]
+
+
+class JobListEventsParams(TypedDict, total=False):
+    after: str
+    """Identifier for the last event from the previous pagination request."""
+
+    limit: int
+    """Number of events to retrieve."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py
new file mode 100644
index 00000000..b79f3ce8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/job_list_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import TypedDict
+
+__all__ = ["JobListParams"]
+
+
+class JobListParams(TypedDict, total=False):
+    after: str
+    """Identifier for the last job from the previous pagination request."""
+
+    limit: int
+    """Number of fine-tuning jobs to retrieve."""
+
+    metadata: Optional[Dict[str, str]]
+    """Optional metadata filter.
+
+    To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to
+    indicate no metadata.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py
new file mode 100644
index 00000000..6c93da1b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .checkpoint_list_params import CheckpointListParams as CheckpointListParams
+from .fine_tuning_job_checkpoint import FineTuningJobCheckpoint as FineTuningJobCheckpoint
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py
new file mode 100644
index 00000000..adceb3b2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/checkpoint_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["CheckpointListParams"]
+
+
+class CheckpointListParams(TypedDict, total=False):
+    after: str
+    """Identifier for the last checkpoint ID from the previous pagination request."""
+
+    limit: int
+    """Number of checkpoints to retrieve."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py
new file mode 100644
index 00000000..bd07317a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["FineTuningJobCheckpoint", "Metrics"]
+
+
+class Metrics(BaseModel):
+    full_valid_loss: Optional[float] = None
+
+    full_valid_mean_token_accuracy: Optional[float] = None
+
+    step: Optional[float] = None
+
+    train_loss: Optional[float] = None
+
+    train_mean_token_accuracy: Optional[float] = None
+
+    valid_loss: Optional[float] = None
+
+    valid_mean_token_accuracy: Optional[float] = None
+
+
+class FineTuningJobCheckpoint(BaseModel):
+    id: str
+    """The checkpoint identifier, which can be referenced in the API endpoints."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the checkpoint was created."""
+
+    fine_tuned_model_checkpoint: str
+    """The name of the fine-tuned checkpoint model that is created."""
+
+    fine_tuning_job_id: str
+    """The name of the fine-tuning job that this checkpoint was created from."""
+
+    metrics: Metrics
+    """Metrics at the step number during the fine-tuning job."""
+
+    object: Literal["fine_tuning.job.checkpoint"]
+    """The object type, which is always "fine_tuning.job.checkpoint"."""
+
+    step_number: int
+    """The step number that the checkpoint was created at."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image.py b/.venv/lib/python3.12/site-packages/openai/types/image.py
new file mode 100644
index 00000000..f48aa2c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["Image"]
+
+
+class Image(BaseModel):
+    b64_json: Optional[str] = None
+    """
+    The base64-encoded JSON of the generated image, if `response_format` is
+    `b64_json`.
+    """
+
+    revised_prompt: Optional[str] = None
+    """
+    The prompt that was used to generate the image, if there was any revision to the
+    prompt.
+    """
+
+    url: Optional[str] = None
+    """The URL of the generated image, if `response_format` is `url` (default)."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py b/.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py
new file mode 100644
index 00000000..d20f6729
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_create_variation_params.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import FileTypes
+from .image_model import ImageModel
+
+__all__ = ["ImageCreateVariationParams"]
+
+
+class ImageCreateVariationParams(TypedDict, total=False):
+    image: Required[FileTypes]
+    """The image to use as the basis for the variation(s).
+
+    Must be a valid PNG file, less than 4MB, and square.
+    """
+
+    model: Union[str, ImageModel, None]
+    """The model to use for image generation.
+
+    Only `dall-e-2` is supported at this time.
+    """
+
+    n: Optional[int]
+    """The number of images to generate.
+
+    Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
+    """
+
+    response_format: Optional[Literal["url", "b64_json"]]
+    """The format in which the generated images are returned.
+
+    Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+    image has been generated.
+    """
+
+    size: Optional[Literal["256x256", "512x512", "1024x1024"]]
+    """The size of the generated images.
+
+    Must be one of `256x256`, `512x512`, or `1024x1024`.
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py b/.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py
new file mode 100644
index 00000000..1cb10611
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_edit_params.py
@@ -0,0 +1,62 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import FileTypes
+from .image_model import ImageModel
+
+__all__ = ["ImageEditParams"]
+
+
+class ImageEditParams(TypedDict, total=False):
+    image: Required[FileTypes]
+    """The image to edit.
+
+    Must be a valid PNG file, less than 4MB, and square. If mask is not provided,
+    image must have transparency, which will be used as the mask.
+    """
+
+    prompt: Required[str]
+    """A text description of the desired image(s).
+
+    The maximum length is 1000 characters.
+    """
+
+    mask: FileTypes
+    """An additional image whose fully transparent areas (e.g.
+
+    where alpha is zero) indicate where `image` should be edited. Must be a valid
+    PNG file, less than 4MB, and have the same dimensions as `image`.
+    """
+
+    model: Union[str, ImageModel, None]
+    """The model to use for image generation.
+
+    Only `dall-e-2` is supported at this time.
+    """
+
+    n: Optional[int]
+    """The number of images to generate. Must be between 1 and 10."""
+
+    response_format: Optional[Literal["url", "b64_json"]]
+    """The format in which the generated images are returned.
+
+    Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+    image has been generated.
+    """
+
+    size: Optional[Literal["256x256", "512x512", "1024x1024"]]
+    """The size of the generated images.
+
+    Must be one of `256x256`, `512x512`, or `1024x1024`.
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py b/.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py
new file mode 100644
index 00000000..c88c45f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_generate_params.py
@@ -0,0 +1,65 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .image_model import ImageModel
+
+__all__ = ["ImageGenerateParams"]
+
+
+class ImageGenerateParams(TypedDict, total=False):
+    prompt: Required[str]
+    """A text description of the desired image(s).
+
+    The maximum length is 1000 characters for `dall-e-2` and 4000 characters for
+    `dall-e-3`.
+    """
+
+    model: Union[str, ImageModel, None]
+    """The model to use for image generation."""
+
+    n: Optional[int]
+    """The number of images to generate.
+
+    Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
+    """
+
+    quality: Literal["standard", "hd"]
+    """The quality of the image that will be generated.
+
+    `hd` creates images with finer details and greater consistency across the image.
+    This param is only supported for `dall-e-3`.
+    """
+
+    response_format: Optional[Literal["url", "b64_json"]]
+    """The format in which the generated images are returned.
+
+    Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+    image has been generated.
+    """
+
+    size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
+    """The size of the generated images.
+
+    Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one
+    of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
+    """
+
+    style: Optional[Literal["vivid", "natural"]]
+    """The style of the generated images.
+
+    Must be one of `vivid` or `natural`. Vivid causes the model to lean towards
+    generating hyper-real and dramatic images. Natural causes the model to produce
+    more natural, less hyper-real looking images. This param is only supported for
+    `dall-e-3`.
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/image_model.py b/.venv/lib/python3.12/site-packages/openai/types/image_model.py
new file mode 100644
index 00000000..1672369b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/image_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ImageModel"]
+
+ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/images_response.py b/.venv/lib/python3.12/site-packages/openai/types/images_response.py
new file mode 100644
index 00000000..7cee8131
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/images_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .image import Image
+from .._models import BaseModel
+
+__all__ = ["ImagesResponse"]
+
+
+class ImagesResponse(BaseModel):
+    created: int
+
+    data: List[Image]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/model.py b/.venv/lib/python3.12/site-packages/openai/types/model.py
new file mode 100644
index 00000000..2631ee8d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/model.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Model"]
+
+
+class Model(BaseModel):
+    id: str
+    """The model identifier, which can be referenced in the API endpoints."""
+
+    created: int
+    """The Unix timestamp (in seconds) when the model was created."""
+
+    object: Literal["model"]
+    """The object type, which is always "model"."""
+
+    owned_by: str
+    """The organization that owns the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/model_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/model_deleted.py
new file mode 100644
index 00000000..7f81e1b3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/model_deleted.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["ModelDeleted"]
+
+
+class ModelDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation.py b/.venv/lib/python3.12/site-packages/openai/types/moderation.py
new file mode 100644
index 00000000..608f5622
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation.py
@@ -0,0 +1,186 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["Moderation", "Categories", "CategoryAppliedInputTypes", "CategoryScores"]
+
+
+class Categories(BaseModel):
+    harassment: bool
+    """
+    Content that expresses, incites, or promotes harassing language towards any
+    target.
+    """
+
+    harassment_threatening: bool = FieldInfo(alias="harassment/threatening")
+    """
+    Harassment content that also includes violence or serious harm towards any
+    target.
+    """
+
+    hate: bool
+    """
+    Content that expresses, incites, or promotes hate based on race, gender,
+    ethnicity, religion, nationality, sexual orientation, disability status, or
+    caste. Hateful content aimed at non-protected groups (e.g., chess players) is
+    harassment.
+    """
+
+    hate_threatening: bool = FieldInfo(alias="hate/threatening")
+    """
+    Hateful content that also includes violence or serious harm towards the targeted
+    group based on race, gender, ethnicity, religion, nationality, sexual
+    orientation, disability status, or caste.
+    """
+
+    illicit: Optional[bool] = None
+    """
+    Content that includes instructions or advice that facilitate the planning or
+    execution of wrongdoing, or that gives advice or instruction on how to commit
+    illicit acts. For example, "how to shoplift" would fit this category.
+    """
+
+    illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None)
+    """
+    Content that includes instructions or advice that facilitate the planning or
+    execution of wrongdoing that also includes violence, or that gives advice or
+    instruction on the procurement of any weapon.
+    """
+
+    self_harm: bool = FieldInfo(alias="self-harm")
+    """
+    Content that promotes, encourages, or depicts acts of self-harm, such as
+    suicide, cutting, and eating disorders.
+    """
+
+    self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions")
+    """
+    Content that encourages performing acts of self-harm, such as suicide, cutting,
+    and eating disorders, or that gives instructions or advice on how to commit such
+    acts.
+    """
+
+    self_harm_intent: bool = FieldInfo(alias="self-harm/intent")
+    """
+    Content where the speaker expresses that they are engaging or intend to engage
+    in acts of self-harm, such as suicide, cutting, and eating disorders.
+    """
+
+    sexual: bool
+    """
+    Content meant to arouse sexual excitement, such as the description of sexual
+    activity, or that promotes sexual services (excluding sex education and
+    wellness).
+    """
+
+    sexual_minors: bool = FieldInfo(alias="sexual/minors")
+    """Sexual content that includes an individual who is under 18 years old."""
+
+    violence: bool
+    """Content that depicts death, violence, or physical injury."""
+
+    violence_graphic: bool = FieldInfo(alias="violence/graphic")
+    """Content that depicts death, violence, or physical injury in graphic detail."""
+
+
+class CategoryAppliedInputTypes(BaseModel):
+    harassment: List[Literal["text"]]
+    """The applied input type(s) for the category 'harassment'."""
+
+    harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening")
+    """The applied input type(s) for the category 'harassment/threatening'."""
+
+    hate: List[Literal["text"]]
+    """The applied input type(s) for the category 'hate'."""
+
+    hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening")
+    """The applied input type(s) for the category 'hate/threatening'."""
+
+    illicit: List[Literal["text"]]
+    """The applied input type(s) for the category 'illicit'."""
+
+    illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent")
+    """The applied input type(s) for the category 'illicit/violent'."""
+
+    self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm")
+    """The applied input type(s) for the category 'self-harm'."""
+
+    self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions")
+    """The applied input type(s) for the category 'self-harm/instructions'."""
+
+    self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent")
+    """The applied input type(s) for the category 'self-harm/intent'."""
+
+    sexual: List[Literal["text", "image"]]
+    """The applied input type(s) for the category 'sexual'."""
+
+    sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors")
+    """The applied input type(s) for the category 'sexual/minors'."""
+
+    violence: List[Literal["text", "image"]]
+    """The applied input type(s) for the category 'violence'."""
+
+    violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic")
+    """The applied input type(s) for the category 'violence/graphic'."""
+
+
+class CategoryScores(BaseModel):
+    harassment: float
+    """The score for the category 'harassment'."""
+
+    harassment_threatening: float = FieldInfo(alias="harassment/threatening")
+    """The score for the category 'harassment/threatening'."""
+
+    hate: float
+    """The score for the category 'hate'."""
+
+    hate_threatening: float = FieldInfo(alias="hate/threatening")
+    """The score for the category 'hate/threatening'."""
+
+    illicit: float
+    """The score for the category 'illicit'."""
+
+    illicit_violent: float = FieldInfo(alias="illicit/violent")
+    """The score for the category 'illicit/violent'."""
+
+    self_harm: float = FieldInfo(alias="self-harm")
+    """The score for the category 'self-harm'."""
+
+    self_harm_instructions: float = FieldInfo(alias="self-harm/instructions")
+    """The score for the category 'self-harm/instructions'."""
+
+    self_harm_intent: float = FieldInfo(alias="self-harm/intent")
+    """The score for the category 'self-harm/intent'."""
+
+    sexual: float
+    """The score for the category 'sexual'."""
+
+    sexual_minors: float = FieldInfo(alias="sexual/minors")
+    """The score for the category 'sexual/minors'."""
+
+    violence: float
+    """The score for the category 'violence'."""
+
+    violence_graphic: float = FieldInfo(alias="violence/graphic")
+    """The score for the category 'violence/graphic'."""
+
+
+class Moderation(BaseModel):
+    categories: Categories
+    """A list of the categories, and whether they are flagged or not."""
+
+    category_applied_input_types: CategoryAppliedInputTypes
+    """
+    A list of the categories along with the input type(s) that the score applies to.
+    """
+
+    category_scores: CategoryScores
+    """A list of the categories along with their scores as predicted by model."""
+
+    flagged: bool
+    """Whether any of the below categories are flagged."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py
new file mode 100644
index 00000000..3ea2f3cd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_params.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Required, TypedDict
+
+from .moderation_model import ModerationModel
+from .moderation_multi_modal_input_param import ModerationMultiModalInputParam
+
+__all__ = ["ModerationCreateParams"]
+
+
+class ModerationCreateParams(TypedDict, total=False):
+    input: Required[Union[str, List[str], Iterable[ModerationMultiModalInputParam]]]
+    """Input (or inputs) to classify.
+
+    Can be a single string, an array of strings, or an array of multi-modal input
+    objects similar to other models.
+    """
+
+    model: Union[str, ModerationModel]
+    """The content moderation model you would like to use.
+
+    Learn more in
+    [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
+    learn about available models
+    [here](https://platform.openai.com/docs/models#moderation).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py
new file mode 100644
index 00000000..79684f8a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_create_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .moderation import Moderation
+
+__all__ = ["ModerationCreateResponse"]
+
+
+class ModerationCreateResponse(BaseModel):
+    id: str
+    """The unique identifier for the moderation request."""
+
+    model: str
+    """The model used to generate the moderation results."""
+
+    results: List[Moderation]
+    """A list of moderation objects."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py
new file mode 100644
index 00000000..9a69a6a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_image_url_input_param.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ModerationImageURLInputParam", "ImageURL"]
+
+
+class ImageURL(TypedDict, total=False):
+    url: Required[str]
+    """Either a URL of the image or the base64 encoded image data."""
+
+
+class ModerationImageURLInputParam(TypedDict, total=False):
+    image_url: Required[ImageURL]
+    """Contains either an image URL or a data URL for a base64 encoded image."""
+
+    type: Required[Literal["image_url"]]
+    """Always `image_url`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_model.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_model.py
new file mode 100644
index 00000000..64954c45
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_model.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ModerationModel"]
+
+ModerationModel: TypeAlias = Literal[
+    "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable"
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py
new file mode 100644
index 00000000..4314e7b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_multi_modal_input_param.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .moderation_text_input_param import ModerationTextInputParam
+from .moderation_image_url_input_param import ModerationImageURLInputParam
+
+__all__ = ["ModerationMultiModalInputParam"]
+
+ModerationMultiModalInputParam: TypeAlias = Union[ModerationImageURLInputParam, ModerationTextInputParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py
new file mode 100644
index 00000000..e5da5333
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/moderation_text_input_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ModerationTextInputParam"]
+
+
+class ModerationTextInputParam(TypedDict, total=False):
+    text: Required[str]
+    """A string of text to classify."""
+
+    type: Required[Literal["text"]]
+    """Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py b/.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py
new file mode 100644
index 00000000..e4cd61a8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/other_file_chunking_strategy_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["OtherFileChunkingStrategyObject"]
+
+
+class OtherFileChunkingStrategyObject(BaseModel):
+    type: Literal["other"]
+    """Always `other`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py
new file mode 100644
index 00000000..4f07a3d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/__init__.py
@@ -0,0 +1,155 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .tool import Tool as Tool
+from .response import Response as Response
+from .tool_param import ToolParam as ToolParam
+from .computer_tool import ComputerTool as ComputerTool
+from .function_tool import FunctionTool as FunctionTool
+from .response_item import ResponseItem as ResponseItem
+from .response_error import ResponseError as ResponseError
+from .response_usage import ResponseUsage as ResponseUsage
+from .parsed_response import (
+    ParsedContent as ParsedContent,
+    ParsedResponse as ParsedResponse,
+    ParsedResponseOutputItem as ParsedResponseOutputItem,
+    ParsedResponseOutputText as ParsedResponseOutputText,
+    ParsedResponseOutputMessage as ParsedResponseOutputMessage,
+    ParsedResponseFunctionToolCall as ParsedResponseFunctionToolCall,
+)
+from .response_status import ResponseStatus as ResponseStatus
+from .web_search_tool import WebSearchTool as WebSearchTool
+from .file_search_tool import FileSearchTool as FileSearchTool
+from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes
+from .response_item_list import ResponseItemList as ResponseItemList
+from .computer_tool_param import ComputerToolParam as ComputerToolParam
+from .function_tool_param import FunctionToolParam as FunctionToolParam
+from .response_includable import ResponseIncludable as ResponseIncludable
+from .response_input_file import ResponseInputFile as ResponseInputFile
+from .response_input_text import ResponseInputText as ResponseInputText
+from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions
+from .response_error_event import ResponseErrorEvent as ResponseErrorEvent
+from .response_input_image import ResponseInputImage as ResponseInputImage
+from .response_input_param import ResponseInputParam as ResponseInputParam
+from .response_output_item import ResponseOutputItem as ResponseOutputItem
+from .response_output_text import ResponseOutputText as ResponseOutputText
+from .response_text_config import ResponseTextConfig as ResponseTextConfig
+from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction
+from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent
+from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent
+from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam
+from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
+from .input_item_list_params import InputItemListParams as InputItemListParams
+from .response_create_params import ResponseCreateParams as ResponseCreateParams
+from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent
+from .response_input_content import ResponseInputContent as ResponseInputContent
+from .response_output_message import ResponseOutputMessage as ResponseOutputMessage
+from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal
+from .response_reasoning_item import ResponseReasoningItem as ResponseReasoningItem
+from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam
+from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam
+from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent
+from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams
+from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent
+from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent
+from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent
+from .response_input_file_param import ResponseInputFileParam as ResponseInputFileParam
+from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam
+from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam
+from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
+from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
+from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
+from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam
+from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
+from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
+from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
+from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
+from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall
+from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem
+from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent
+from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch
+from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam
+from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent
+from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam
+from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam
+from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam
+from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
+from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
+from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem
+from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
+from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam
+from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
+from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall
+from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam
+from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent
+from .response_audio_transcript_delta_event import (
+    ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
+)
+from .response_computer_tool_call_output_item import (
+    ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem,
+)
+from .response_format_text_json_schema_config import (
+    ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig,
+)
+from .response_function_tool_call_output_item import (
+    ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem,
+)
+from .response_web_search_call_completed_event import (
+    ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent,
+)
+from .response_web_search_call_searching_event import (
+    ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent,
+)
+from .response_file_search_call_completed_event import (
+    ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent,
+)
+from .response_file_search_call_searching_event import (
+    ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent,
+)
+from .response_input_message_content_list_param import (
+    ResponseInputMessageContentListParam as ResponseInputMessageContentListParam,
+)
+from .response_web_search_call_in_progress_event import (
+    ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,
+)
+from .response_file_search_call_in_progress_event import (
+    ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent,
+)
+from .response_function_call_arguments_done_event import (
+    ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
+)
+from .response_function_call_arguments_delta_event import (
+    ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
+)
+from .response_computer_tool_call_output_screenshot import (
+    ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot,
+)
+from .response_format_text_json_schema_config_param import (
+    ResponseFormatTextJSONSchemaConfigParam as ResponseFormatTextJSONSchemaConfigParam,
+)
+from .response_code_interpreter_call_code_done_event import (
+    ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent,
+)
+from .response_code_interpreter_call_completed_event import (
+    ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent,
+)
+from .response_code_interpreter_call_code_delta_event import (
+    ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent,
+)
+from .response_code_interpreter_call_in_progress_event import (
+    ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent,
+)
+from .response_code_interpreter_call_interpreting_event import (
+    ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent,
+)
+from .response_computer_tool_call_output_screenshot_param import (
+    ResponseComputerToolCallOutputScreenshotParam as ResponseComputerToolCallOutputScreenshotParam,
+)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py
new file mode 100644
index 00000000..dffb7af7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComputerTool"]
+
+
+class ComputerTool(BaseModel):
+    display_height: float
+    """The height of the computer display."""
+
+    display_width: float
+    """The width of the computer display."""
+
+    environment: Literal["mac", "windows", "ubuntu", "browser"]
+    """The type of computer environment to control."""
+
+    type: Literal["computer_use_preview"]
+    """The type of the computer use tool. Always `computer_use_preview`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py
new file mode 100644
index 00000000..6b1072ff
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/computer_tool_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ComputerToolParam"]
+
+
+class ComputerToolParam(TypedDict, total=False):
+    display_height: Required[float]
+    """The height of the computer display."""
+
+    display_width: Required[float]
+    """The width of the computer display."""
+
+    environment: Required[Literal["mac", "windows", "ubuntu", "browser"]]
+    """The type of computer environment to control."""
+
+    type: Required[Literal["computer_use_preview"]]
+    """The type of the computer use tool. Always `computer_use_preview`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py
new file mode 100644
index 00000000..ef2f1c5f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/easy_input_message_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+
+__all__ = ["EasyInputMessageParam"]
+
+
+class EasyInputMessageParam(TypedDict, total=False):
+    content: Required[Union[str, ResponseInputMessageContentListParam]]
+    """
+    Text, image, or audio input to the model, used to generate a response. Can also
+    contain previous assistant responses.
+    """
+
+    role: Required[Literal["user", "assistant", "system", "developer"]]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py
new file mode 100644
index 00000000..683fc533
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from ..shared.compound_filter import CompoundFilter
+from ..shared.comparison_filter import ComparisonFilter
+
+__all__ = ["FileSearchTool", "Filters", "RankingOptions"]
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(BaseModel):
+    ranker: Optional[Literal["auto", "default-2024-11-15"]] = None
+    """The ranker to use for the file search."""
+
+    score_threshold: Optional[float] = None
+    """
+    The score threshold for the file search, a number between 0 and 1. Numbers
+    closer to 1 will attempt to return only the most relevant results, but may
+    return fewer results.
+    """
+
+
+class FileSearchTool(BaseModel):
+    type: Literal["file_search"]
+    """The type of the file search tool. Always `file_search`."""
+
+    vector_store_ids: List[str]
+    """The IDs of the vector stores to search."""
+
+    filters: Optional[Filters] = None
+    """A filter to apply based on file attributes."""
+
+    max_num_results: Optional[int] = None
+    """The maximum number of results to return.
+
+    This number should be between 1 and 50 inclusive.
+    """
+
+    ranking_options: Optional[RankingOptions] = None
+    """Ranking options for search."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py
new file mode 100644
index 00000000..2d6af853
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/file_search_tool_param.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..shared_params.compound_filter import CompoundFilter
+from ..shared_params.comparison_filter import ComparisonFilter
+
+__all__ = ["FileSearchToolParam", "Filters", "RankingOptions"]
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(TypedDict, total=False):
+    ranker: Literal["auto", "default-2024-11-15"]
+    """The ranker to use for the file search."""
+
+    score_threshold: float
+    """
+    The score threshold for the file search, a number between 0 and 1. Numbers
+    closer to 1 will attempt to return only the most relevant results, but may
+    return fewer results.
+    """
+
+
+class FileSearchToolParam(TypedDict, total=False):
+    type: Required[Literal["file_search"]]
+    """The type of the file search tool. Always `file_search`."""
+
+    vector_store_ids: Required[List[str]]
+    """The IDs of the vector stores to search."""
+
+    filters: Filters
+    """A filter to apply based on file attributes."""
+
+    max_num_results: int
+    """The maximum number of results to return.
+
+    This number should be between 1 and 50 inclusive.
+    """
+
+    ranking_options: RankingOptions
+    """Ranking options for search."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py
new file mode 100644
index 00000000..236a2c7c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FunctionTool"]
+
+
+class FunctionTool(BaseModel):
+    name: str
+    """The name of the function to call."""
+
+    parameters: Dict[str, object]
+    """A JSON schema object describing the parameters of the function."""
+
+    strict: bool
+    """Whether to enforce strict parameter validation. Default `true`."""
+
+    type: Literal["function"]
+    """The type of the function tool. Always `function`."""
+
+    description: Optional[str] = None
+    """A description of the function.
+
+    Used by the model to determine whether or not to call the function.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py
new file mode 100644
index 00000000..774a22e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/function_tool_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FunctionToolParam"]
+
+
+class FunctionToolParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to call."""
+
+    parameters: Required[Dict[str, object]]
+    """A JSON schema object describing the parameters of the function."""
+
+    strict: Required[bool]
+    """Whether to enforce strict parameter validation. Default `true`."""
+
+    type: Required[Literal["function"]]
+    """The type of the function tool. Always `function`."""
+
+    description: Optional[str]
+    """A description of the function.
+
+    Used by the model to determine whether or not to call the function.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py
new file mode 100644
index 00000000..e0b71f1a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/input_item_list_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["InputItemListParams"]
+
+
+class InputItemListParams(TypedDict, total=False):
+    after: str
+    """An item ID to list items after, used in pagination."""
+
+    before: str
+    """An item ID to list items before, used in pagination."""
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """The order to return the input items in. Default is `asc`.
+
+    - `asc`: Return the input items in ascending order.
+    - `desc`: Return the input items in descending order.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py b/.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py
new file mode 100644
index 00000000..1263dfd6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/parsed_response.py
@@ -0,0 +1,77 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import TYPE_CHECKING, List, Union, Generic, TypeVar, Optional
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response import Response
+from ..._models import GenericModel
+from ..._utils._transform import PropertyInfo
+from .response_output_text import ResponseOutputText
+from .response_output_message import ResponseOutputMessage
+from .response_output_refusal import ResponseOutputRefusal
+from .response_reasoning_item import ResponseReasoningItem
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_function_tool_call import ResponseFunctionToolCall
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+
+__all__ = ["ParsedResponse", "ParsedResponseOutputMessage", "ParsedResponseOutputText"]
+
+ContentType = TypeVar("ContentType")
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedResponseOutputText(ResponseOutputText, GenericModel, Generic[ContentType]):
+    parsed: Optional[ContentType] = None
+
+
+ParsedContent: TypeAlias = Annotated[
+    Union[ParsedResponseOutputText[ContentType], ResponseOutputRefusal],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ParsedResponseOutputMessage(ResponseOutputMessage, GenericModel, Generic[ContentType]):
+    if TYPE_CHECKING:
+        content: List[ParsedContent[ContentType]]  # type: ignore[assignment]
+    else:
+        content: List[ParsedContent]
+
+
+class ParsedResponseFunctionToolCall(ResponseFunctionToolCall):
+    parsed_arguments: object = None
+
+
+ParsedResponseOutputItem: TypeAlias = Annotated[
+    Union[
+        ParsedResponseOutputMessage[ContentType],
+        ParsedResponseFunctionToolCall,
+        ResponseFileSearchToolCall,
+        ResponseFunctionWebSearch,
+        ResponseComputerToolCall,
+        ResponseReasoningItem,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ParsedResponse(Response, GenericModel, Generic[ContentType]):
+    if TYPE_CHECKING:
+        output: List[ParsedResponseOutputItem[ContentType]]  # type: ignore[assignment]
+    else:
+        output: List[ParsedResponseOutputItem]
+
+    @property
+    def output_parsed(self) -> Optional[ContentType]:
+        for output in self.output:
+            if output.type == "message":
+                for content in output.content:
+                    if content.type == "output_text" and content.parsed:
+                        return content.parsed
+
+        return None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response.py
new file mode 100644
index 00000000..1bedf808
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response.py
@@ -0,0 +1,204 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from .tool import Tool
+from ..._models import BaseModel
+from .response_error import ResponseError
+from .response_usage import ResponseUsage
+from .response_status import ResponseStatus
+from ..shared.metadata import Metadata
+from ..shared.reasoning import Reasoning
+from .tool_choice_types import ToolChoiceTypes
+from .tool_choice_options import ToolChoiceOptions
+from .response_output_item import ResponseOutputItem
+from .response_text_config import ResponseTextConfig
+from .tool_choice_function import ToolChoiceFunction
+from ..shared.responses_model import ResponsesModel
+
+__all__ = ["Response", "IncompleteDetails", "ToolChoice"]
+
+
+class IncompleteDetails(BaseModel):
+    reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
+    """The reason why the response is incomplete."""
+
+
+ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction]
+
+
+class Response(BaseModel):
+    id: str
+    """Unique identifier for this Response."""
+
+    created_at: float
+    """Unix timestamp (in seconds) of when this Response was created."""
+
+    error: Optional[ResponseError] = None
+    """An error object returned when the model fails to generate a Response."""
+
+    incomplete_details: Optional[IncompleteDetails] = None
+    """Details about why the response is incomplete."""
+
+    instructions: Optional[str] = None
+    """
+    Inserts a system (or developer) message as the first item in the model's
+    context.
+
+    When using along with `previous_response_id`, the instructions from a previous
+    response will be not be carried over to the next response. This makes it simple
+    to swap out system (or developer) messages in new responses.
+    """
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    model: ResponsesModel
+    """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+    OpenAI offers a wide range of models with different capabilities, performance
+    characteristics, and price points. Refer to the
+    [model guide](https://platform.openai.com/docs/models) to browse and compare
+    available models.
+    """
+
+    object: Literal["response"]
+    """The object type of this resource - always set to `response`."""
+
+    output: List[ResponseOutputItem]
+    """An array of content items generated by the model.
+
+    - The length and order of items in the `output` array is dependent on the
+      model's response.
+    - Rather than accessing the first item in the `output` array and assuming it's
+      an `assistant` message with the content generated by the model, you might
+      consider using the `output_text` property where supported in SDKs.
+    """
+
+    parallel_tool_calls: bool
+    """Whether to allow the model to run tool calls in parallel."""
+
+    temperature: Optional[float] = None
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic. We generally recommend altering
+    this or `top_p` but not both.
+    """
+
+    tool_choice: ToolChoice
+    """
+    How the model should select which tool (or tools) to use when generating a
+    response. See the `tools` parameter to see how to specify which tools the model
+    can call.
+    """
+
+    tools: List[Tool]
+    """An array of tools the model may call while generating a response.
+
+    You can specify which tool to use by setting the `tool_choice` parameter.
+
+    The two categories of tools you can provide the model are:
+
+    - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+      capabilities, like
+      [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+      [file search](https://platform.openai.com/docs/guides/tools-file-search).
+      Learn more about
+      [built-in tools](https://platform.openai.com/docs/guides/tools).
+    - **Function calls (custom tools)**: Functions that are defined by you, enabling
+      the model to call your own code. Learn more about
+      [function calling](https://platform.openai.com/docs/guides/function-calling).
+    """
+
+    top_p: Optional[float] = None
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or `temperature` but not both.
+    """
+
+    max_output_tokens: Optional[int] = None
+    """
+    An upper bound for the number of tokens that can be generated for a response,
+    including visible output tokens and
+    [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    previous_response_id: Optional[str] = None
+    """The unique ID of the previous response to the model.
+
+    Use this to create multi-turn conversations. Learn more about
+    [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+    """
+
+    reasoning: Optional[Reasoning] = None
+    """**o-series models only**
+
+    Configuration options for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    status: Optional[ResponseStatus] = None
+    """The status of the response generation.
+
+    One of `completed`, `failed`, `in_progress`, or `incomplete`.
+    """
+
+    text: Optional[ResponseTextConfig] = None
+    """Configuration options for a text response from the model.
+
+    Can be plain text or structured JSON data. Learn more:
+
+    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+    - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+    """
+
+    truncation: Optional[Literal["auto", "disabled"]] = None
+    """The truncation strategy to use for the model response.
+
+    - `auto`: If the context of this response and previous ones exceeds the model's
+      context window size, the model will truncate the response to fit the context
+      window by dropping input items in the middle of the conversation.
+    - `disabled` (default): If a model response will exceed the context window size
+      for a model, the request will fail with a 400 error.
+    """
+
+    usage: Optional[ResponseUsage] = None
+    """
+    Represents token usage details including input tokens, output tokens, a
+    breakdown of output tokens, and the total tokens used.
+    """
+
+    user: Optional[str] = None
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
+
+    @property
+    def output_text(self) -> str:
+        """Convenience property that aggregates all `output_text` items from the `output`
+        list.
+
+        If no `output_text` content blocks exist, then an empty string is returned.
+        """
+        texts: List[str] = []
+        for output in self.output:
+            if output.type == "message":
+                for content in output.content:
+                    if content.type == "output_text":
+                        texts.append(content.text)
+
+        return "".join(texts)
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py
new file mode 100644
index 00000000..f3d77fac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_delta_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioDeltaEvent"]
+
+
+class ResponseAudioDeltaEvent(BaseModel):
+    delta: str
+    """A chunk of Base64 encoded response audio bytes."""
+
+    type: Literal["response.audio.delta"]
+    """The type of the event. Always `response.audio.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py
new file mode 100644
index 00000000..5654f8e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_done_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioDoneEvent"]
+
+
+class ResponseAudioDoneEvent(BaseModel):
+    type: Literal["response.audio.done"]
+    """The type of the event. Always `response.audio.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py
new file mode 100644
index 00000000..69b6660f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_delta_event.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDeltaEvent"]
+
+
+class ResponseAudioTranscriptDeltaEvent(BaseModel):
+    delta: str
+    """The partial transcript of the audio response."""
+
+    type: Literal["response.audio.transcript.delta"]
+    """The type of the event. Always `response.audio.transcript.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py
new file mode 100644
index 00000000..1a20319f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_audio_transcript_done_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseAudioTranscriptDoneEvent"]
+
+
+class ResponseAudioTranscriptDoneEvent(BaseModel):
+    type: Literal["response.audio.transcript.done"]
+    """The type of the event. Always `response.audio.transcript.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py
new file mode 100644
index 00000000..7527238d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterCallCodeDeltaEvent"]
+
+
+class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
+    delta: str
+    """The partial code snippet added by the code interpreter."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.code.delta"]
+    """The type of the event. Always `response.code_interpreter_call.code.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py
new file mode 100644
index 00000000..f84d4cf3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterCallCodeDoneEvent"]
+
+
+class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
+    code: str
+    """The final code snippet output by the code interpreter."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.code.done"]
+    """The type of the event. Always `response.code_interpreter_call.code.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py
new file mode 100644
index 00000000..b0cb73fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_completed_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallCompletedEvent"]
+
+
+class ResponseCodeInterpreterCallCompletedEvent(BaseModel):
+    code_interpreter_call: ResponseCodeInterpreterToolCall
+    """A tool call to run code."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.completed"]
+    """The type of the event. Always `response.code_interpreter_call.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py
new file mode 100644
index 00000000..64b739f3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_in_progress_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallInProgressEvent"]
+
+
+class ResponseCodeInterpreterCallInProgressEvent(BaseModel):
+    code_interpreter_call: ResponseCodeInterpreterToolCall
+    """A tool call to run code."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.in_progress"]
+    """The type of the event. Always `response.code_interpreter_call.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py
new file mode 100644
index 00000000..3100eac1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_call_interpreting_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = ["ResponseCodeInterpreterCallInterpretingEvent"]
+
+
+class ResponseCodeInterpreterCallInterpretingEvent(BaseModel):
+    code_interpreter_call: ResponseCodeInterpreterToolCall
+    """A tool call to run code."""
+
+    output_index: int
+    """The index of the output item that the code interpreter call is in progress."""
+
+    type: Literal["response.code_interpreter_call.interpreting"]
+    """The type of the event. Always `response.code_interpreter_call.interpreting`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py
new file mode 100644
index 00000000..d5a50570
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_code_interpreter_tool_call.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = ["ResponseCodeInterpreterToolCall", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"]
+
+
+class ResultLogs(BaseModel):
+    logs: str
+    """The logs of the code interpreter tool call."""
+
+    type: Literal["logs"]
+    """The type of the code interpreter text output. Always `logs`."""
+
+
+class ResultFilesFile(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    mime_type: str
+    """The MIME type of the file."""
+
+
+class ResultFiles(BaseModel):
+    files: List[ResultFilesFile]
+
+    type: Literal["files"]
+    """The type of the code interpreter file output. Always `files`."""
+
+
+Result: TypeAlias = Annotated[Union[ResultLogs, ResultFiles], PropertyInfo(discriminator="type")]
+
+
+class ResponseCodeInterpreterToolCall(BaseModel):
+    id: str
+    """The unique ID of the code interpreter tool call."""
+
+    code: str
+    """The code to run."""
+
+    results: List[Result]
+    """The results of the code interpreter tool call."""
+
+    status: Literal["in_progress", "interpreting", "completed"]
+    """The status of the code interpreter tool call."""
+
+    type: Literal["code_interpreter_call"]
+    """The type of the code interpreter tool call. Always `code_interpreter_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py
new file mode 100644
index 00000000..a944f248
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_completed_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseCompletedEvent"]
+
+
+class ResponseCompletedEvent(BaseModel):
+    response: Response
+    """Properties of the completed response."""
+
+    type: Literal["response.completed"]
+    """The type of the event. Always `response.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py
new file mode 100644
index 00000000..99483756
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call.py
@@ -0,0 +1,212 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+    "ResponseComputerToolCall",
+    "Action",
+    "ActionClick",
+    "ActionDoubleClick",
+    "ActionDrag",
+    "ActionDragPath",
+    "ActionKeypress",
+    "ActionMove",
+    "ActionScreenshot",
+    "ActionScroll",
+    "ActionType",
+    "ActionWait",
+    "PendingSafetyCheck",
+]
+
+
+class ActionClick(BaseModel):
+    button: Literal["left", "right", "wheel", "back", "forward"]
+    """Indicates which mouse button was pressed during the click.
+
+    One of `left`, `right`, `wheel`, `back`, or `forward`.
+    """
+
+    type: Literal["click"]
+    """Specifies the event type.
+
+    For a click action, this property is always set to `click`.
+    """
+
+    x: int
+    """The x-coordinate where the click occurred."""
+
+    y: int
+    """The y-coordinate where the click occurred."""
+
+
+class ActionDoubleClick(BaseModel):
+    type: Literal["double_click"]
+    """Specifies the event type.
+
+    For a double click action, this property is always set to `double_click`.
+    """
+
+    x: int
+    """The x-coordinate where the double click occurred."""
+
+    y: int
+    """The y-coordinate where the double click occurred."""
+
+
+class ActionDragPath(BaseModel):
+    x: int
+    """The x-coordinate."""
+
+    y: int
+    """The y-coordinate."""
+
+
+class ActionDrag(BaseModel):
+    path: List[ActionDragPath]
+    """An array of coordinates representing the path of the drag action.
+
+    Coordinates will appear as an array of objects, eg
+
+    ```
+    [
+      { x: 100, y: 200 },
+      { x: 200, y: 300 }
+    ]
+    ```
+    """
+
+    type: Literal["drag"]
+    """Specifies the event type.
+
+    For a drag action, this property is always set to `drag`.
+    """
+
+
+class ActionKeypress(BaseModel):
+    keys: List[str]
+    """The combination of keys the model is requesting to be pressed.
+
+    This is an array of strings, each representing a key.
+    """
+
+    type: Literal["keypress"]
+    """Specifies the event type.
+
+    For a keypress action, this property is always set to `keypress`.
+    """
+
+
+class ActionMove(BaseModel):
+    type: Literal["move"]
+    """Specifies the event type.
+
+    For a move action, this property is always set to `move`.
+    """
+
+    x: int
+    """The x-coordinate to move to."""
+
+    y: int
+    """The y-coordinate to move to."""
+
+
+class ActionScreenshot(BaseModel):
+    type: Literal["screenshot"]
+    """Specifies the event type.
+
+    For a screenshot action, this property is always set to `screenshot`.
+    """
+
+
+class ActionScroll(BaseModel):
+    scroll_x: int
+    """The horizontal scroll distance."""
+
+    scroll_y: int
+    """The vertical scroll distance."""
+
+    type: Literal["scroll"]
+    """Specifies the event type.
+
+    For a scroll action, this property is always set to `scroll`.
+    """
+
+    x: int
+    """The x-coordinate where the scroll occurred."""
+
+    y: int
+    """The y-coordinate where the scroll occurred."""
+
+
+class ActionType(BaseModel):
+    text: str
+    """The text to type."""
+
+    type: Literal["type"]
+    """Specifies the event type.
+
+    For a type action, this property is always set to `type`.
+    """
+
+
+class ActionWait(BaseModel):
+    type: Literal["wait"]
+    """Specifies the event type.
+
+    For a wait action, this property is always set to `wait`.
+    """
+
+
+Action: TypeAlias = Annotated[
+    Union[
+        ActionClick,
+        ActionDoubleClick,
+        ActionDrag,
+        ActionKeypress,
+        ActionMove,
+        ActionScreenshot,
+        ActionScroll,
+        ActionType,
+        ActionWait,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class PendingSafetyCheck(BaseModel):
+    id: str
+    """The ID of the pending safety check."""
+
+    code: str
+    """The type of the pending safety check."""
+
+    message: str
+    """Details about the pending safety check."""
+
+
+class ResponseComputerToolCall(BaseModel):
+    id: str
+    """The unique ID of the computer call."""
+
+    action: Action
+    """A click action."""
+
+    call_id: str
+    """An identifier used when responding to the tool call with output."""
+
+    pending_safety_checks: List[PendingSafetyCheck]
+    """The pending safety checks for the computer call."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Literal["computer_call"]
+    """The type of the computer call. Always `computer_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py
new file mode 100644
index 00000000..a2dd68f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_item.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot
+
+__all__ = ["ResponseComputerToolCallOutputItem", "AcknowledgedSafetyCheck"]
+
+
+class AcknowledgedSafetyCheck(BaseModel):
+    id: str
+    """The ID of the pending safety check."""
+
+    code: str
+    """The type of the pending safety check."""
+
+    message: str
+    """Details about the pending safety check."""
+
+
+class ResponseComputerToolCallOutputItem(BaseModel):
+    id: str
+    """The unique ID of the computer call tool output."""
+
+    call_id: str
+    """The ID of the computer tool call that produced the output."""
+
+    output: ResponseComputerToolCallOutputScreenshot
+    """A computer screenshot image used with the computer use tool."""
+
+    type: Literal["computer_call_output"]
+    """The type of the computer tool call output. Always `computer_call_output`."""
+
+    acknowledged_safety_checks: Optional[List[AcknowledgedSafetyCheck]] = None
+    """
+    The safety checks reported by the API that have been acknowledged by the
+    developer.
+    """
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py
new file mode 100644
index 00000000..a500da85
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseComputerToolCallOutputScreenshot"]
+
+
+class ResponseComputerToolCallOutputScreenshot(BaseModel):
+    type: Literal["computer_screenshot"]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
+
+    file_id: Optional[str] = None
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: Optional[str] = None
+    """The URL of the screenshot image."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py
new file mode 100644
index 00000000..efc2028a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_output_screenshot_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseComputerToolCallOutputScreenshotParam"]
+
+
+class ResponseComputerToolCallOutputScreenshotParam(TypedDict, total=False):
+    type: Required[Literal["computer_screenshot"]]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
+
+    file_id: str
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: str
+    """The URL of the screenshot image."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py
new file mode 100644
index 00000000..d4ef56ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_computer_tool_call_param.py
@@ -0,0 +1,208 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+    "ResponseComputerToolCallParam",
+    "Action",
+    "ActionClick",
+    "ActionDoubleClick",
+    "ActionDrag",
+    "ActionDragPath",
+    "ActionKeypress",
+    "ActionMove",
+    "ActionScreenshot",
+    "ActionScroll",
+    "ActionType",
+    "ActionWait",
+    "PendingSafetyCheck",
+]
+
+
+class ActionClick(TypedDict, total=False):
+    button: Required[Literal["left", "right", "wheel", "back", "forward"]]
+    """Indicates which mouse button was pressed during the click.
+
+    One of `left`, `right`, `wheel`, `back`, or `forward`.
+    """
+
+    type: Required[Literal["click"]]
+    """Specifies the event type.
+
+    For a click action, this property is always set to `click`.
+    """
+
+    x: Required[int]
+    """The x-coordinate where the click occurred."""
+
+    y: Required[int]
+    """The y-coordinate where the click occurred."""
+
+
+class ActionDoubleClick(TypedDict, total=False):
+    type: Required[Literal["double_click"]]
+    """Specifies the event type.
+
+    For a double click action, this property is always set to `double_click`.
+    """
+
+    x: Required[int]
+    """The x-coordinate where the double click occurred."""
+
+    y: Required[int]
+    """The y-coordinate where the double click occurred."""
+
+
+class ActionDragPath(TypedDict, total=False):
+    x: Required[int]
+    """The x-coordinate."""
+
+    y: Required[int]
+    """The y-coordinate."""
+
+
+class ActionDrag(TypedDict, total=False):
+    path: Required[Iterable[ActionDragPath]]
+    """An array of coordinates representing the path of the drag action.
+
+    Coordinates will appear as an array of objects, eg
+
+    ```
+    [
+      { x: 100, y: 200 },
+      { x: 200, y: 300 }
+    ]
+    ```
+    """
+
+    type: Required[Literal["drag"]]
+    """Specifies the event type.
+
+    For a drag action, this property is always set to `drag`.
+    """
+
+
+class ActionKeypress(TypedDict, total=False):
+    keys: Required[List[str]]
+    """The combination of keys the model is requesting to be pressed.
+
+    This is an array of strings, each representing a key.
+    """
+
+    type: Required[Literal["keypress"]]
+    """Specifies the event type.
+
+    For a keypress action, this property is always set to `keypress`.
+    """
+
+
+class ActionMove(TypedDict, total=False):
+    type: Required[Literal["move"]]
+    """Specifies the event type.
+
+    For a move action, this property is always set to `move`.
+    """
+
+    x: Required[int]
+    """The x-coordinate to move to."""
+
+    y: Required[int]
+    """The y-coordinate to move to."""
+
+
+class ActionScreenshot(TypedDict, total=False):
+    type: Required[Literal["screenshot"]]
+    """Specifies the event type.
+
+    For a screenshot action, this property is always set to `screenshot`.
+    """
+
+
+class ActionScroll(TypedDict, total=False):
+    scroll_x: Required[int]
+    """The horizontal scroll distance."""
+
+    scroll_y: Required[int]
+    """The vertical scroll distance."""
+
+    type: Required[Literal["scroll"]]
+    """Specifies the event type.
+
+    For a scroll action, this property is always set to `scroll`.
+    """
+
+    x: Required[int]
+    """The x-coordinate where the scroll occurred."""
+
+    y: Required[int]
+    """The y-coordinate where the scroll occurred."""
+
+
+class ActionType(TypedDict, total=False):
+    text: Required[str]
+    """The text to type."""
+
+    type: Required[Literal["type"]]
+    """Specifies the event type.
+
+    For a type action, this property is always set to `type`.
+    """
+
+
+class ActionWait(TypedDict, total=False):
+    type: Required[Literal["wait"]]
+    """Specifies the event type.
+
+    For a wait action, this property is always set to `wait`.
+    """
+
+
+Action: TypeAlias = Union[
+    ActionClick,
+    ActionDoubleClick,
+    ActionDrag,
+    ActionKeypress,
+    ActionMove,
+    ActionScreenshot,
+    ActionScroll,
+    ActionType,
+    ActionWait,
+]
+
+
+class PendingSafetyCheck(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the pending safety check."""
+
+    code: Required[str]
+    """The type of the pending safety check."""
+
+    message: Required[str]
+    """Details about the pending safety check."""
+
+
+class ResponseComputerToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the computer call."""
+
+    action: Required[Action]
+    """A click action."""
+
+    call_id: Required[str]
+    """An identifier used when responding to the tool call with output."""
+
+    pending_safety_checks: Required[Iterable[PendingSafetyCheck]]
+    """The pending safety checks for the computer call."""
+
+    status: Required[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Required[Literal["computer_call"]]
+    """The type of the computer call. Always `computer_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py
new file mode 100644
index 00000000..93f5ec4b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_added_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseContentPartAddedEvent", "Part"]
+
+Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseContentPartAddedEvent(BaseModel):
+    content_index: int
+    """The index of the content part that was added."""
+
+    item_id: str
+    """The ID of the output item that the content part was added to."""
+
+    output_index: int
+    """The index of the output item that the content part was added to."""
+
+    part: Part
+    """The content part that was added."""
+
+    type: Literal["response.content_part.added"]
+    """The type of the event. Always `response.content_part.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py
new file mode 100644
index 00000000..4ec07398
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_content_part_done_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseContentPartDoneEvent", "Part"]
+
+Part: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseContentPartDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part that is done."""
+
+    item_id: str
+    """The ID of the output item that the content part was added to."""
+
+    output_index: int
+    """The index of the output item that the content part was added to."""
+
+    part: Part
+    """The content part that is done."""
+
+    type: Literal["response.content_part.done"]
+    """The type of the event. Always `response.content_part.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py
new file mode 100644
index 00000000..651050c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_create_params.py
@@ -0,0 +1,204 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .tool_param import ToolParam
+from .response_includable import ResponseIncludable
+from .tool_choice_options import ToolChoiceOptions
+from .response_input_param import ResponseInputParam
+from ..shared_params.metadata import Metadata
+from .tool_choice_types_param import ToolChoiceTypesParam
+from ..shared_params.reasoning import Reasoning
+from .response_text_config_param import ResponseTextConfigParam
+from .tool_choice_function_param import ToolChoiceFunctionParam
+from ..shared_params.responses_model import ResponsesModel
+
+__all__ = [
+    "ResponseCreateParamsBase",
+    "ToolChoice",
+    "ResponseCreateParamsNonStreaming",
+    "ResponseCreateParamsStreaming",
+]
+
+
+class ResponseCreateParamsBase(TypedDict, total=False):
+    input: Required[Union[str, ResponseInputParam]]
+    """Text, image, or file inputs to the model, used to generate a response.
+
+    Learn more:
+
+    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+    - [Image inputs](https://platform.openai.com/docs/guides/images)
+    - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+    - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+    - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+    """
+
+    model: Required[ResponsesModel]
+    """Model ID used to generate the response, like `gpt-4o` or `o1`.
+
+    OpenAI offers a wide range of models with different capabilities, performance
+    characteristics, and price points. Refer to the
+    [model guide](https://platform.openai.com/docs/models) to browse and compare
+    available models.
+    """
+
+    include: Optional[List[ResponseIncludable]]
+    """Specify additional output data to include in the model response.
+
+    Currently supported values are:
+
+    - `file_search_call.results`: Include the search results of the file search tool
+      call.
+    - `message.input_image.image_url`: Include image urls from the input message.
+    - `computer_call_output.output.image_url`: Include image urls from the computer
+      call output.
+    """
+
+    instructions: Optional[str]
+    """
+    Inserts a system (or developer) message as the first item in the model's
+    context.
+
+    When using along with `previous_response_id`, the instructions from a previous
+    response will be not be carried over to the next response. This makes it simple
+    to swap out system (or developer) messages in new responses.
+    """
+
+    max_output_tokens: Optional[int]
+    """
+    An upper bound for the number of tokens that can be generated for a response,
+    including visible output tokens and
+    [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    parallel_tool_calls: Optional[bool]
+    """Whether to allow the model to run tool calls in parallel."""
+
+    previous_response_id: Optional[str]
+    """The unique ID of the previous response to the model.
+
+    Use this to create multi-turn conversations. Learn more about
+    [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+    """
+
+    reasoning: Optional[Reasoning]
+    """**o-series models only**
+
+    Configuration options for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+    """
+
+    store: Optional[bool]
+    """Whether to store the generated model response for later retrieval via API."""
+
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic. We generally recommend altering
+    this or `top_p` but not both.
+    """
+
+    text: ResponseTextConfigParam
+    """Configuration options for a text response from the model.
+
+    Can be plain text or structured JSON data. Learn more:
+
+    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+    - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+    """
+
+    tool_choice: ToolChoice
+    """
+    How the model should select which tool (or tools) to use when generating a
+    response. See the `tools` parameter to see how to specify which tools the model
+    can call.
+    """
+
+    tools: Iterable[ToolParam]
+    """An array of tools the model may call while generating a response.
+
+    You can specify which tool to use by setting the `tool_choice` parameter.
+
+    The two categories of tools you can provide the model are:
+
+    - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+      capabilities, like
+      [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+      [file search](https://platform.openai.com/docs/guides/tools-file-search).
+      Learn more about
+      [built-in tools](https://platform.openai.com/docs/guides/tools).
+    - **Function calls (custom tools)**: Functions that are defined by you, enabling
+      the model to call your own code. Learn more about
+      [function calling](https://platform.openai.com/docs/guides/function-calling).
+    """
+
+    top_p: Optional[float]
+    """
+    An alternative to sampling with temperature, called nucleus sampling, where the
+    model considers the results of the tokens with top_p probability mass. So 0.1
+    means only the tokens comprising the top 10% probability mass are considered.
+
+    We generally recommend altering this or `temperature` but not both.
+    """
+
+    truncation: Optional[Literal["auto", "disabled"]]
+    """The truncation strategy to use for the model response.
+
+    - `auto`: If the context of this response and previous ones exceeds the model's
+      context window size, the model will truncate the response to fit the context
+      window by dropping input items in the middle of the conversation.
+    - `disabled` (default): If a model response will exceed the context window size
+      for a model, the request will fail with a 400 error.
+    """
+
+    user: str
+    """
+    A unique identifier representing your end-user, which can help OpenAI to monitor
+    and detect abuse.
+    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+    """
+
+
+ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypesParam, ToolChoiceFunctionParam]
+
+
+class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
+    stream: Optional[Literal[False]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+    for more information.
+    """
+
+
+class ResponseCreateParamsStreaming(ResponseCreateParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+    for more information.
+    """
+
+
+ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py
new file mode 100644
index 00000000..7a524cec
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_created_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseCreatedEvent"]
+
+
+class ResponseCreatedEvent(BaseModel):
+    response: Response
+    """The response that was created."""
+
+    type: Literal["response.created"]
+    """The type of the event. Always `response.created`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py
new file mode 100644
index 00000000..90f1fcf5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseError"]
+
+
+class ResponseError(BaseModel):
+    code: Literal[
+        "server_error",
+        "rate_limit_exceeded",
+        "invalid_prompt",
+        "vector_store_timeout",
+        "invalid_image",
+        "invalid_image_format",
+        "invalid_base64_image",
+        "invalid_image_url",
+        "image_too_large",
+        "image_too_small",
+        "image_parse_error",
+        "image_content_policy_violation",
+        "invalid_image_mode",
+        "image_file_too_large",
+        "unsupported_image_media_type",
+        "empty_image_file",
+        "failed_to_download_image",
+        "image_file_not_found",
+    ]
+    """The error code for the response."""
+
+    message: str
+    """A human-readable description of the error."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py
new file mode 100644
index 00000000..1b7e605d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_error_event.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseErrorEvent"]
+
+
+class ResponseErrorEvent(BaseModel):
+    code: Optional[str] = None
+    """The error code."""
+
+    message: str
+    """The error message."""
+
+    param: Optional[str] = None
+    """The error parameter."""
+
+    type: Literal["error"]
+    """The type of the event. Always `error`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py
new file mode 100644
index 00000000..3e8f75d8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_failed_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseFailedEvent"]
+
+
+class ResponseFailedEvent(BaseModel):
+    response: Response
+    """The response that failed."""
+
+    type: Literal["response.failed"]
+    """The type of the event. Always `response.failed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py
new file mode 100644
index 00000000..4b860833
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_completed_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallCompletedEvent"]
+
+
+class ResponseFileSearchCallCompletedEvent(BaseModel):
+    item_id: str
+    """The ID of the output item that the file search call is initiated."""
+
+    output_index: int
+    """The index of the output item that the file search call is initiated."""
+
+    type: Literal["response.file_search_call.completed"]
+    """The type of the event. Always `response.file_search_call.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py
new file mode 100644
index 00000000..eb42e3da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_in_progress_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallInProgressEvent"]
+
+
+class ResponseFileSearchCallInProgressEvent(BaseModel):
+    item_id: str
+    """The ID of the output item that the file search call is initiated."""
+
+    output_index: int
+    """The index of the output item that the file search call is initiated."""
+
+    type: Literal["response.file_search_call.in_progress"]
+    """The type of the event. Always `response.file_search_call.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py
new file mode 100644
index 00000000..3cd8905d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_call_searching_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchCallSearchingEvent"]
+
+
+class ResponseFileSearchCallSearchingEvent(BaseModel):
+    item_id: str
+    """The ID of the output item that the file search call is initiated."""
+
+    output_index: int
+    """The index of the output item that the file search call is searching."""
+
+    type: Literal["response.file_search_call.searching"]
+    """The type of the event. Always `response.file_search_call.searching`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py
new file mode 100644
index 00000000..ef1c6a56
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFileSearchToolCall", "Result"]
+
+
+class Result(BaseModel):
+    attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    file_id: Optional[str] = None
+    """The unique ID of the file."""
+
+    filename: Optional[str] = None
+    """The name of the file."""
+
+    score: Optional[float] = None
+    """The relevance score of the file - a value between 0 and 1."""
+
+    text: Optional[str] = None
+    """The text that was retrieved from the file."""
+
+
+class ResponseFileSearchToolCall(BaseModel):
+    id: str
+    """The unique ID of the file search tool call."""
+
+    queries: List[str]
+    """The queries used to search for files."""
+
+    status: Literal["in_progress", "searching", "completed", "incomplete", "failed"]
+    """The status of the file search tool call.
+
+    One of `in_progress`, `searching`, `incomplete` or `failed`,
+    """
+
+    type: Literal["file_search_call"]
+    """The type of the file search tool call. Always `file_search_call`."""
+
+    results: Optional[List[Result]] = None
+    """The results of the file search tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py
new file mode 100644
index 00000000..9a4177cf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_file_search_tool_call_param.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFileSearchToolCallParam", "Result"]
+
+
+class Result(TypedDict, total=False):
+    attributes: Optional[Dict[str, Union[str, float, bool]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    file_id: str
+    """The unique ID of the file."""
+
+    filename: str
+    """The name of the file."""
+
+    score: float
+    """The relevance score of the file - a value between 0 and 1."""
+
+    text: str
+    """The text that was retrieved from the file."""
+
+
+class ResponseFileSearchToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the file search tool call."""
+
+    queries: Required[List[str]]
+    """The queries used to search for files."""
+
+    status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]]
+    """The status of the file search tool call.
+
+    One of `in_progress`, `searching`, `incomplete` or `failed`,
+    """
+
+    type: Required[Literal["file_search_call"]]
+    """The type of the file search tool call. Always `file_search_call`."""
+
+    results: Optional[Iterable[Result]]
+    """The results of the file search tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py
new file mode 100644
index 00000000..a4896bf9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..shared.response_format_text import ResponseFormatText
+from ..shared.response_format_json_object import ResponseFormatJSONObject
+from .response_format_text_json_schema_config import ResponseFormatTextJSONSchemaConfig
+
+__all__ = ["ResponseFormatTextConfig"]
+
+ResponseFormatTextConfig: TypeAlias = Annotated[
+    Union[ResponseFormatText, ResponseFormatTextJSONSchemaConfig, ResponseFormatJSONObject],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py
new file mode 100644
index 00000000..fcaf8f3f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_config_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from ..shared_params.response_format_text import ResponseFormatText
+from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from .response_format_text_json_schema_config_param import ResponseFormatTextJSONSchemaConfigParam
+
+__all__ = ["ResponseFormatTextConfigParam"]
+
+ResponseFormatTextConfigParam: TypeAlias = Union[
+    ResponseFormatText, ResponseFormatTextJSONSchemaConfigParam, ResponseFormatJSONObject
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py
new file mode 100644
index 00000000..3cf06637
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatTextJSONSchemaConfig"]
+
+
+class ResponseFormatTextJSONSchemaConfig(BaseModel):
+    schema_: Dict[str, object] = FieldInfo(alias="schema")
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
+
+    type: Literal["json_schema"]
+    """The type of response format being defined. Always `json_schema`."""
+
+    description: Optional[str] = None
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    name: Optional[str] = None
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    strict: Optional[bool] = None
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py
new file mode 100644
index 00000000..211c5d1e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_format_text_json_schema_config_param.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatTextJSONSchemaConfigParam"]
+
+
+class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False):
+    schema: Required[Dict[str, object]]
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
+
+    type: Required[Literal["json_schema"]]
+    """The type of response format being defined. Always `json_schema`."""
+
+    description: str
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    name: str
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    strict: Optional[bool]
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py
new file mode 100644
index 00000000..0989b7ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_delta_event.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"]
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
+    delta: str
+    """The function-call arguments delta that is added."""
+
+    item_id: str
+    """The ID of the output item that the function-call arguments delta is added to."""
+
+    output_index: int
+    """
+    The index of the output item that the function-call arguments delta is added to.
+    """
+
+    type: Literal["response.function_call_arguments.delta"]
+    """The type of the event. Always `response.function_call_arguments.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py
new file mode 100644
index 00000000..1d805a57
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_call_arguments_done_event.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionCallArgumentsDoneEvent"]
+
+
+class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
+    arguments: str
+    """The function-call arguments."""
+
+    item_id: str
+    """The ID of the item."""
+
+    output_index: int
+    """The index of the output item."""
+
+    type: Literal["response.function_call_arguments.done"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py
new file mode 100644
index 00000000..2a848220
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionToolCall"]
+
+
+class ResponseFunctionToolCall(BaseModel):
+    arguments: str
+    """A JSON string of the arguments to pass to the function."""
+
+    call_id: str
+    """The unique ID of the function tool call generated by the model."""
+
+    name: str
+    """The name of the function to run."""
+
+    type: Literal["function_call"]
+    """The type of the function tool call. Always `function_call`."""
+
+    id: Optional[str] = None
+    """The unique ID of the function tool call."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py
new file mode 100644
index 00000000..25984f94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_item.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .response_function_tool_call import ResponseFunctionToolCall
+
+__all__ = ["ResponseFunctionToolCallItem"]
+
+
+class ResponseFunctionToolCallItem(ResponseFunctionToolCall):
+    id: str  # type: ignore
+    """The unique ID of the function tool call."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py
new file mode 100644
index 00000000..4c8c41a6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_output_item.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionToolCallOutputItem"]
+
+
+class ResponseFunctionToolCallOutputItem(BaseModel):
+    id: str
+    """The unique ID of the function call tool output."""
+
+    call_id: str
+    """The unique ID of the function tool call generated by the model."""
+
+    output: str
+    """A JSON string of the output of the function tool call."""
+
+    type: Literal["function_call_output"]
+    """The type of the function tool call output. Always `function_call_output`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py
new file mode 100644
index 00000000..eaa263cf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_tool_call_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFunctionToolCallParam"]
+
+
+class ResponseFunctionToolCallParam(TypedDict, total=False):
+    arguments: Required[str]
+    """A JSON string of the arguments to pass to the function."""
+
+    call_id: Required[str]
+    """The unique ID of the function tool call generated by the model."""
+
+    name: Required[str]
+    """The name of the function to run."""
+
+    type: Required[Literal["function_call"]]
+    """The type of the function tool call. Always `function_call`."""
+
+    id: str
+    """The unique ID of the function tool call."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py
new file mode 100644
index 00000000..44734b68
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionWebSearch"]
+
+
+class ResponseFunctionWebSearch(BaseModel):
+    id: str
+    """The unique ID of the web search tool call."""
+
+    status: Literal["in_progress", "searching", "completed", "failed"]
+    """The status of the web search tool call."""
+
+    type: Literal["web_search_call"]
+    """The type of the web search tool call. Always `web_search_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py
new file mode 100644
index 00000000..d413e60b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_function_web_search_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFunctionWebSearchParam"]
+
+
+class ResponseFunctionWebSearchParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the web search tool call."""
+
+    status: Required[Literal["in_progress", "searching", "completed", "failed"]]
+    """The status of the web search tool call."""
+
+    type: Required[Literal["web_search_call"]]
+    """The type of the web search tool call. Always `web_search_call`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py
new file mode 100644
index 00000000..7d96cbb8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_in_progress_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseInProgressEvent"]
+
+
+class ResponseInProgressEvent(BaseModel):
+    response: Response
+    """The response that is in progress."""
+
+    type: Literal["response.in_progress"]
+    """The type of the event. Always `response.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py
new file mode 100644
index 00000000..83489fa7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_includable.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ResponseIncludable"]
+
+ResponseIncludable: TypeAlias = Literal[
+    "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url"
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py
new file mode 100644
index 00000000..742b789c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_incomplete_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseIncompleteEvent"]
+
+
+class ResponseIncompleteEvent(BaseModel):
+    response: Response
+    """The response that was incomplete."""
+
+    type: Literal["response.incomplete"]
+    """The type of the event. Always `response.incomplete`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py
new file mode 100644
index 00000000..1726909a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_input_file import ResponseInputFile
+from .response_input_text import ResponseInputText
+from .response_input_image import ResponseInputImage
+
+__all__ = ["ResponseInputContent"]
+
+ResponseInputContent: TypeAlias = Annotated[
+    Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py
new file mode 100644
index 00000000..7791cdfd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_content_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .response_input_file_param import ResponseInputFileParam
+from .response_input_text_param import ResponseInputTextParam
+from .response_input_image_param import ResponseInputImageParam
+
+__all__ = ["ResponseInputContentParam"]
+
+ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py
new file mode 100644
index 00000000..00b35dc8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputFile"]
+
+
+class ResponseInputFile(BaseModel):
+    type: Literal["input_file"]
+    """The type of the input item. Always `input_file`."""
+
+    file_data: Optional[str] = None
+    """The content of the file to be sent to the model."""
+
+    file_id: Optional[str] = None
+    """The ID of the file to be sent to the model."""
+
+    filename: Optional[str] = None
+    """The name of the file to be sent to the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py
new file mode 100644
index 00000000..dc06a4ea
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_file_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputFileParam"]
+
+
+class ResponseInputFileParam(TypedDict, total=False):
+    type: Required[Literal["input_file"]]
+    """The type of the input item. Always `input_file`."""
+
+    file_data: str
+    """The content of the file to be sent to the model."""
+
+    file_id: str
+    """The ID of the file to be sent to the model."""
+
+    filename: str
+    """The name of the file to be sent to the model."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py
new file mode 100644
index 00000000..d719f44e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputImage"]
+
+
+class ResponseInputImage(BaseModel):
+    detail: Literal["high", "low", "auto"]
+    """The detail level of the image to be sent to the model.
+
+    One of `high`, `low`, or `auto`. Defaults to `auto`.
+    """
+
+    type: Literal["input_image"]
+    """The type of the input item. Always `input_image`."""
+
+    file_id: Optional[str] = None
+    """The ID of the file to be sent to the model."""
+
+    image_url: Optional[str] = None
+    """The URL of the image to be sent to the model.
+
+    A fully qualified URL or base64 encoded image in a data URL.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py
new file mode 100644
index 00000000..5dd4db2b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_image_param.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputImageParam"]
+
+
+class ResponseInputImageParam(TypedDict, total=False):
+    detail: Required[Literal["high", "low", "auto"]]
+    """The detail level of the image to be sent to the model.
+
+    One of `high`, `low`, or `auto`. Defaults to `auto`.
+    """
+
+    type: Required[Literal["input_image"]]
+    """The type of the input item. Always `input_image`."""
+
+    file_id: Optional[str]
+    """The ID of the file to be sent to the model."""
+
+    image_url: Optional[str]
+    """The URL of the image to be sent to the model.
+
+    A fully qualified URL or base64 encoded image in a data URL.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py
new file mode 100644
index 00000000..2505f7c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_item_param.py
@@ -0,0 +1,131 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .easy_input_message_param import EasyInputMessageParam
+from .response_output_message_param import ResponseOutputMessageParam
+from .response_reasoning_item_param import ResponseReasoningItemParam
+from .response_computer_tool_call_param import ResponseComputerToolCallParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
+
+__all__ = [
+    "ResponseInputItemParam",
+    "Message",
+    "ComputerCallOutput",
+    "ComputerCallOutputAcknowledgedSafetyCheck",
+    "FunctionCallOutput",
+    "ItemReference",
+]
+
+
+class Message(TypedDict, total=False):
+    content: Required[ResponseInputMessageContentListParam]
+    """
+    A list of one or many input items to the model, containing different content
+    types.
+    """
+
+    role: Required[Literal["user", "system", "developer"]]
+    """The role of the message input. One of `user`, `system`, or `developer`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always set to `message`."""
+
+
+class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the pending safety check."""
+
+    code: Required[str]
+    """The type of the pending safety check."""
+
+    message: Required[str]
+    """Details about the pending safety check."""
+
+
+class ComputerCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The ID of the computer tool call that produced the output."""
+
+    output: Required[ResponseComputerToolCallOutputScreenshotParam]
+    """A computer screenshot image used with the computer use tool."""
+
+    type: Required[Literal["computer_call_output"]]
+    """The type of the computer tool call output. Always `computer_call_output`."""
+
+    id: str
+    """The ID of the computer tool call output."""
+
+    acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+    """
+    The safety checks reported by the API that have been acknowledged by the
+    developer.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+
+class FunctionCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The unique ID of the function tool call generated by the model."""
+
+    output: Required[str]
+    """A JSON string of the output of the function tool call."""
+
+    type: Required[Literal["function_call_output"]]
+    """The type of the function tool call output. Always `function_call_output`."""
+
+    id: str
+    """The unique ID of the function tool call output.
+
+    Populated when this item is returned via API.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+class ItemReference(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the item to reference."""
+
+    type: Required[Literal["item_reference"]]
+    """The type of item to reference. Always `item_reference`."""
+
+
+ResponseInputItemParam: TypeAlias = Union[
+    EasyInputMessageParam,
+    Message,
+    ResponseOutputMessageParam,
+    ResponseFileSearchToolCallParam,
+    ResponseComputerToolCallParam,
+    ComputerCallOutput,
+    ResponseFunctionWebSearchParam,
+    ResponseFunctionToolCallParam,
+    FunctionCallOutput,
+    ResponseReasoningItemParam,
+    ItemReference,
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py
new file mode 100644
index 00000000..99b7c10f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .response_input_content import ResponseInputContent
+
+__all__ = ["ResponseInputMessageContentList"]
+
+ResponseInputMessageContentList: TypeAlias = List[ResponseInputContent]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py
new file mode 100644
index 00000000..080613df
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_content_list_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import TypeAlias
+
+from .response_input_file_param import ResponseInputFileParam
+from .response_input_text_param import ResponseInputTextParam
+from .response_input_image_param import ResponseInputImageParam
+
+__all__ = ["ResponseInputMessageContentListParam", "ResponseInputContentParam"]
+
+ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
+
+ResponseInputMessageContentListParam: TypeAlias = List[ResponseInputContentParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py
new file mode 100644
index 00000000..6a788e7f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_message_item.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_input_message_content_list import ResponseInputMessageContentList
+
+__all__ = ["ResponseInputMessageItem"]
+
+
+class ResponseInputMessageItem(BaseModel):
+    id: str
+    """The unique ID of the message input."""
+
+    content: ResponseInputMessageContentList
+    """
+    A list of one or many input items to the model, containing different content
+    types.
+    """
+
+    role: Literal["user", "system", "developer"]
+    """The role of the message input. One of `user`, `system`, or `developer`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always set to `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py
new file mode 100644
index 00000000..84a80eb7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_param.py
@@ -0,0 +1,134 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .easy_input_message_param import EasyInputMessageParam
+from .response_output_message_param import ResponseOutputMessageParam
+from .response_reasoning_item_param import ResponseReasoningItemParam
+from .response_computer_tool_call_param import ResponseComputerToolCallParam
+from .response_function_tool_call_param import ResponseFunctionToolCallParam
+from .response_function_web_search_param import ResponseFunctionWebSearchParam
+from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
+
+__all__ = [
+    "ResponseInputParam",
+    "ResponseInputItemParam",
+    "Message",
+    "ComputerCallOutput",
+    "ComputerCallOutputAcknowledgedSafetyCheck",
+    "FunctionCallOutput",
+    "ItemReference",
+]
+
+
+class Message(TypedDict, total=False):
+    content: Required[ResponseInputMessageContentListParam]
+    """
+    A list of one or many input items to the model, containing different content
+    types.
+    """
+
+    role: Required[Literal["user", "system", "developer"]]
+    """The role of the message input. One of `user`, `system`, or `developer`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always set to `message`."""
+
+
+class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the pending safety check."""
+
+    code: Required[str]
+    """The type of the pending safety check."""
+
+    message: Required[str]
+    """Details about the pending safety check."""
+
+
+class ComputerCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The ID of the computer tool call that produced the output."""
+
+    output: Required[ResponseComputerToolCallOutputScreenshotParam]
+    """A computer screenshot image used with the computer use tool."""
+
+    type: Required[Literal["computer_call_output"]]
+    """The type of the computer tool call output. Always `computer_call_output`."""
+
+    id: str
+    """The ID of the computer tool call output."""
+
+    acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+    """
+    The safety checks reported by the API that have been acknowledged by the
+    developer.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+
+class FunctionCallOutput(TypedDict, total=False):
+    call_id: Required[str]
+    """The unique ID of the function tool call generated by the model."""
+
+    output: Required[str]
+    """A JSON string of the output of the function tool call."""
+
+    type: Required[Literal["function_call_output"]]
+    """The type of the function tool call output. Always `function_call_output`."""
+
+    id: str
+    """The unique ID of the function tool call output.
+
+    Populated when this item is returned via API.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+
+class ItemReference(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the item to reference."""
+
+    type: Required[Literal["item_reference"]]
+    """The type of item to reference. Always `item_reference`."""
+
+
+ResponseInputItemParam: TypeAlias = Union[
+    EasyInputMessageParam,
+    Message,
+    ResponseOutputMessageParam,
+    ResponseFileSearchToolCallParam,
+    ResponseComputerToolCallParam,
+    ComputerCallOutput,
+    ResponseFunctionWebSearchParam,
+    ResponseFunctionToolCallParam,
+    FunctionCallOutput,
+    ResponseReasoningItemParam,
+    ItemReference,
+]
+
+ResponseInputParam: TypeAlias = List[ResponseInputItemParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py
new file mode 100644
index 00000000..ba8d1ea1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputText"]
+
+
+class ResponseInputText(BaseModel):
+    text: str
+    """The text input to the model."""
+
+    type: Literal["input_text"]
+    """The type of the input item. Always `input_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py
new file mode 100644
index 00000000..f2ba8340
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_input_text_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputTextParam"]
+
+
+class ResponseInputTextParam(TypedDict, total=False):
+    text: Required[str]
+    """The text input to the model."""
+
+    type: Required[Literal["input_text"]]
+    """The type of the input item. Always `input_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py
new file mode 100644
index 00000000..dc8d67d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_output_message import ResponseOutputMessage
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_input_message_item import ResponseInputMessageItem
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_function_tool_call_item import ResponseFunctionToolCallItem
+from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
+from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
+
+__all__ = ["ResponseItem"]
+
+ResponseItem: TypeAlias = Annotated[
+    Union[
+        ResponseInputMessageItem,
+        ResponseOutputMessage,
+        ResponseFileSearchToolCall,
+        ResponseComputerToolCall,
+        ResponseComputerToolCallOutputItem,
+        ResponseFunctionWebSearch,
+        ResponseFunctionToolCallItem,
+        ResponseFunctionToolCallOutputItem,
+    ],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py
new file mode 100644
index 00000000..b43eacdb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_item_list.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_item import ResponseItem
+
+__all__ = ["ResponseItemList"]
+
+
+class ResponseItemList(BaseModel):
+    data: List[ResponseItem]
+    """A list of items used to generate this response."""
+
+    first_id: str
+    """The ID of the first item in the list."""
+
+    has_more: bool
+    """Whether there are more items available."""
+
+    last_id: str
+    """The ID of the last item in the list."""
+
+    object: Literal["list"]
+    """The type of object returned, must be `list`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py
new file mode 100644
index 00000000..f1e96931
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_output_message import ResponseOutputMessage
+from .response_reasoning_item import ResponseReasoningItem
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_function_tool_call import ResponseFunctionToolCall
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+
+__all__ = ["ResponseOutputItem"]
+
+ResponseOutputItem: TypeAlias = Annotated[
+    Union[
+        ResponseOutputMessage,
+        ResponseFileSearchToolCall,
+        ResponseFunctionToolCall,
+        ResponseFunctionWebSearch,
+        ResponseComputerToolCall,
+        ResponseReasoningItem,
+    ],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py
new file mode 100644
index 00000000..7344fb9a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_added_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_output_item import ResponseOutputItem
+
+__all__ = ["ResponseOutputItemAddedEvent"]
+
+
+class ResponseOutputItemAddedEvent(BaseModel):
+    item: ResponseOutputItem
+    """The output item that was added."""
+
+    output_index: int
+    """The index of the output item that was added."""
+
+    type: Literal["response.output_item.added"]
+    """The type of the event. Always `response.output_item.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py
new file mode 100644
index 00000000..a0a871a0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_item_done_event.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_output_item import ResponseOutputItem
+
+__all__ = ["ResponseOutputItemDoneEvent"]
+
+
+class ResponseOutputItemDoneEvent(BaseModel):
+    item: ResponseOutputItem
+    """The output item that was marked done."""
+
+    output_index: int
+    """The index of the output item that was marked done."""
+
+    type: Literal["response.output_item.done"]
+    """The type of the event. Always `response.output_item.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py
new file mode 100644
index 00000000..3864aa21
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .response_output_text import ResponseOutputText
+from .response_output_refusal import ResponseOutputRefusal
+
+__all__ = ["ResponseOutputMessage", "Content"]
+
+Content: TypeAlias = Annotated[Union[ResponseOutputText, ResponseOutputRefusal], PropertyInfo(discriminator="type")]
+
+
+class ResponseOutputMessage(BaseModel):
+    id: str
+    """The unique ID of the output message."""
+
+    content: List[Content]
+    """The content of the output message."""
+
+    role: Literal["assistant"]
+    """The role of the output message. Always `assistant`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+    type: Literal["message"]
+    """The type of the output message. Always `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py
new file mode 100644
index 00000000..46cbbd20
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_message_param.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .response_output_text_param import ResponseOutputTextParam
+from .response_output_refusal_param import ResponseOutputRefusalParam
+
+__all__ = ["ResponseOutputMessageParam", "Content"]
+
+Content: TypeAlias = Union[ResponseOutputTextParam, ResponseOutputRefusalParam]
+
+
+class ResponseOutputMessageParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the output message."""
+
+    content: Required[Iterable[Content]]
+    """The content of the output message."""
+
+    role: Required[Literal["assistant"]]
+    """The role of the output message. Always `assistant`."""
+
+    status: Required[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
+
+    type: Required[Literal["message"]]
+    """The type of the output message. Always `message`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py
new file mode 100644
index 00000000..eba58107
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseOutputRefusal"]
+
+
+class ResponseOutputRefusal(BaseModel):
+    refusal: str
+    """The refusal explanationfrom the model."""
+
+    type: Literal["refusal"]
+    """The type of the refusal. Always `refusal`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py
new file mode 100644
index 00000000..53140a60
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_refusal_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseOutputRefusalParam"]
+
+
+class ResponseOutputRefusalParam(TypedDict, total=False):
+    refusal: Required[str]
+    """The refusal explanationfrom the model."""
+
+    type: Required[Literal["refusal"]]
+    """The type of the refusal. Always `refusal`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py
new file mode 100644
index 00000000..fa653cd1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text.py
@@ -0,0 +1,64 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = ["ResponseOutputText", "Annotation", "AnnotationFileCitation", "AnnotationURLCitation", "AnnotationFilePath"]
+
+
+class AnnotationFileCitation(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_citation"]
+    """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(BaseModel):
+    end_index: int
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: int
+    """The index of the first character of the URL citation in the message."""
+
+    title: str
+    """The title of the web resource."""
+
+    type: Literal["url_citation"]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url: str
+    """The URL of the web resource."""
+
+
+class AnnotationFilePath(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_path"]
+    """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Annotated[
+    Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+]
+
+
+class ResponseOutputText(BaseModel):
+    annotations: List[Annotation]
+    """The annotations of the text output."""
+
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py
new file mode 100644
index 00000000..1f096728
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_output_text_param.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+    "ResponseOutputTextParam",
+    "Annotation",
+    "AnnotationFileCitation",
+    "AnnotationURLCitation",
+    "AnnotationFilePath",
+]
+
+
+class AnnotationFileCitation(TypedDict, total=False):
+    file_id: Required[str]
+    """The ID of the file."""
+
+    index: Required[int]
+    """The index of the file in the list of files."""
+
+    type: Required[Literal["file_citation"]]
+    """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(TypedDict, total=False):
+    end_index: Required[int]
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: Required[int]
+    """The index of the first character of the URL citation in the message."""
+
+    title: Required[str]
+    """The title of the web resource."""
+
+    type: Required[Literal["url_citation"]]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url: Required[str]
+    """The URL of the web resource."""
+
+
+class AnnotationFilePath(TypedDict, total=False):
+    file_id: Required[str]
+    """The ID of the file."""
+
+    index: Required[int]
+    """The index of the file in the list of files."""
+
+    type: Required[Literal["file_path"]]
+    """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath]
+
+
+class ResponseOutputTextParam(TypedDict, total=False):
+    annotations: Required[Iterable[Annotation]]
+    """The annotations of the text output."""
+
+    text: Required[str]
+    """The text output from the model."""
+
+    type: Required[Literal["output_text"]]
+    """The type of the output text. Always `output_text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py
new file mode 100644
index 00000000..57e5fbfe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningItem", "Summary"]
+
+
+class Summary(BaseModel):
+    text: str
+    """
+    A short summary of the reasoning used by the model when generating the response.
+    """
+
+    type: Literal["summary_text"]
+    """The type of the object. Always `summary_text`."""
+
+
+class ResponseReasoningItem(BaseModel):
+    id: str
+    """The unique identifier of the reasoning content."""
+
+    summary: List[Summary]
+    """Reasoning text contents."""
+
+    type: Literal["reasoning"]
+    """The type of the object. Always `reasoning`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py
new file mode 100644
index 00000000..adb49d64
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_reasoning_item_param.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseReasoningItemParam", "Summary"]
+
+
+class Summary(TypedDict, total=False):
+    text: Required[str]
+    """
+    A short summary of the reasoning used by the model when generating the response.
+    """
+
+    type: Required[Literal["summary_text"]]
+    """The type of the object. Always `summary_text`."""
+
+
+class ResponseReasoningItemParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique identifier of the reasoning content."""
+
+    summary: Required[Iterable[Summary]]
+    """Reasoning text contents."""
+
+    type: Required[Literal["reasoning"]]
+    """The type of the object. Always `reasoning`."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py
new file mode 100644
index 00000000..04dcdf1c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseRefusalDeltaEvent"]
+
+
+class ResponseRefusalDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the refusal text is added to."""
+
+    delta: str
+    """The refusal text that is added."""
+
+    item_id: str
+    """The ID of the output item that the refusal text is added to."""
+
+    output_index: int
+    """The index of the output item that the refusal text is added to."""
+
+    type: Literal["response.refusal.delta"]
+    """The type of the event. Always `response.refusal.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py
new file mode 100644
index 00000000..a9b6f4b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_refusal_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseRefusalDoneEvent"]
+
+
+class ResponseRefusalDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the refusal text is finalized."""
+
+    item_id: str
+    """The ID of the output item that the refusal text is finalized."""
+
+    output_index: int
+    """The index of the output item that the refusal text is finalized."""
+
+    refusal: str
+    """The refusal text that is finalized."""
+
+    type: Literal["response.refusal.done"]
+    """The type of the event. Always `response.refusal.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py
new file mode 100644
index 00000000..137bf4dc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_retrieve_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import TypedDict
+
+from .response_includable import ResponseIncludable
+
+__all__ = ["ResponseRetrieveParams"]
+
+
+class ResponseRetrieveParams(TypedDict, total=False):
+    include: List[ResponseIncludable]
+    """Additional fields to include in the response.
+
+    See the `include` parameter for Response creation above for more information.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py
new file mode 100644
index 00000000..934d17cd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_status.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ResponseStatus"]
+
+ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "incomplete"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py
new file mode 100644
index 00000000..446863b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_stream_event.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_error_event import ResponseErrorEvent
+from .response_failed_event import ResponseFailedEvent
+from .response_created_event import ResponseCreatedEvent
+from .response_completed_event import ResponseCompletedEvent
+from .response_text_done_event import ResponseTextDoneEvent
+from .response_audio_done_event import ResponseAudioDoneEvent
+from .response_incomplete_event import ResponseIncompleteEvent
+from .response_text_delta_event import ResponseTextDeltaEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent
+from .response_in_progress_event import ResponseInProgressEvent
+from .response_refusal_done_event import ResponseRefusalDoneEvent
+from .response_refusal_delta_event import ResponseRefusalDeltaEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
+from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent
+from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
+from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent
+from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
+from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
+from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
+from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
+from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
+from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
+from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent
+from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent
+from .response_code_interpreter_call_code_delta_event import ResponseCodeInterpreterCallCodeDeltaEvent
+from .response_code_interpreter_call_in_progress_event import ResponseCodeInterpreterCallInProgressEvent
+from .response_code_interpreter_call_interpreting_event import ResponseCodeInterpreterCallInterpretingEvent
+
+__all__ = ["ResponseStreamEvent"]
+
+ResponseStreamEvent: TypeAlias = Annotated[
+    Union[
+        ResponseAudioDeltaEvent,
+        ResponseAudioDoneEvent,
+        ResponseAudioTranscriptDeltaEvent,
+        ResponseAudioTranscriptDoneEvent,
+        ResponseCodeInterpreterCallCodeDeltaEvent,
+        ResponseCodeInterpreterCallCodeDoneEvent,
+        ResponseCodeInterpreterCallCompletedEvent,
+        ResponseCodeInterpreterCallInProgressEvent,
+        ResponseCodeInterpreterCallInterpretingEvent,
+        ResponseCompletedEvent,
+        ResponseContentPartAddedEvent,
+        ResponseContentPartDoneEvent,
+        ResponseCreatedEvent,
+        ResponseErrorEvent,
+        ResponseFileSearchCallCompletedEvent,
+        ResponseFileSearchCallInProgressEvent,
+        ResponseFileSearchCallSearchingEvent,
+        ResponseFunctionCallArgumentsDeltaEvent,
+        ResponseFunctionCallArgumentsDoneEvent,
+        ResponseInProgressEvent,
+        ResponseFailedEvent,
+        ResponseIncompleteEvent,
+        ResponseOutputItemAddedEvent,
+        ResponseOutputItemDoneEvent,
+        ResponseRefusalDeltaEvent,
+        ResponseRefusalDoneEvent,
+        ResponseTextAnnotationDeltaEvent,
+        ResponseTextDeltaEvent,
+        ResponseTextDoneEvent,
+        ResponseWebSearchCallCompletedEvent,
+        ResponseWebSearchCallInProgressEvent,
+        ResponseWebSearchCallSearchingEvent,
+    ],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py
new file mode 100644
index 00000000..4f258228
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_annotation_delta_event.py
@@ -0,0 +1,79 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+    "ResponseTextAnnotationDeltaEvent",
+    "Annotation",
+    "AnnotationFileCitation",
+    "AnnotationURLCitation",
+    "AnnotationFilePath",
+]
+
+
+class AnnotationFileCitation(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_citation"]
+    """The type of the file citation. Always `file_citation`."""
+
+
+class AnnotationURLCitation(BaseModel):
+    end_index: int
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: int
+    """The index of the first character of the URL citation in the message."""
+
+    title: str
+    """The title of the web resource."""
+
+    type: Literal["url_citation"]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url: str
+    """The URL of the web resource."""
+
+
+class AnnotationFilePath(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_path"]
+    """The type of the file path. Always `file_path`."""
+
+
+Annotation: TypeAlias = Annotated[
+    Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+]
+
+
+class ResponseTextAnnotationDeltaEvent(BaseModel):
+    annotation: Annotation
+    """A citation to a file."""
+
+    annotation_index: int
+    """The index of the annotation that was added."""
+
+    content_index: int
+    """The index of the content part that the text annotation was added to."""
+
+    item_id: str
+    """The ID of the output item that the text annotation was added to."""
+
+    output_index: int
+    """The index of the output item that the text annotation was added to."""
+
+    type: Literal["response.output_text.annotation.added"]
+    """The type of the event. Always `response.output_text.annotation.added`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py
new file mode 100644
index 00000000..a1894a91
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .response_format_text_config import ResponseFormatTextConfig
+
+__all__ = ["ResponseTextConfig"]
+
+
+class ResponseTextConfig(BaseModel):
+    format: Optional[ResponseFormatTextConfig] = None
+    """An object specifying the format that the model must output.
+
+    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+    ensures the model will match your supplied JSON schema. Learn more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    The default format is `{ "type": "text" }` with no additional options.
+
+    **Not recommended for gpt-4o and newer models:**
+
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py
new file mode 100644
index 00000000..aec064bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_config_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .response_format_text_config_param import ResponseFormatTextConfigParam
+
+__all__ = ["ResponseTextConfigParam"]
+
+
+class ResponseTextConfigParam(TypedDict, total=False):
+    format: ResponseFormatTextConfigParam
+    """An object specifying the format that the model must output.
+
+    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+    ensures the model will match your supplied JSON schema. Learn more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    The default format is `{ "type": "text" }` with no additional options.
+
+    **Not recommended for gpt-4o and newer models:**
+
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py
new file mode 100644
index 00000000..751a5e2a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseTextDeltaEvent"]
+
+
+class ResponseTextDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the text delta was added to."""
+
+    delta: str
+    """The text delta that was added."""
+
+    item_id: str
+    """The ID of the output item that the text delta was added to."""
+
+    output_index: int
+    """The index of the output item that the text delta was added to."""
+
+    type: Literal["response.output_text.delta"]
+    """The type of the event. Always `response.output_text.delta`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py
new file mode 100644
index 00000000..9b5c5e02
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_text_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseTextDoneEvent"]
+
+
+class ResponseTextDoneEvent(BaseModel):
+    content_index: int
+    """The index of the content part that the text content is finalized."""
+
+    item_id: str
+    """The ID of the output item that the text content is finalized."""
+
+    output_index: int
+    """The index of the output item that the text content is finalized."""
+
+    text: str
+    """The text content that is finalized."""
+
+    type: Literal["response.output_text.done"]
+    """The type of the event. Always `response.output_text.done`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py
new file mode 100644
index 00000000..9ad36bd3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_usage.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"]
+
+
+class InputTokensDetails(BaseModel):
+    cached_tokens: int
+    """The number of tokens that were retrieved from the cache.
+
+    [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
+    """
+
+
+class OutputTokensDetails(BaseModel):
+    reasoning_tokens: int
+    """The number of reasoning tokens."""
+
+
+class ResponseUsage(BaseModel):
+    input_tokens: int
+    """The number of input tokens."""
+
+    input_tokens_details: InputTokensDetails
+    """A detailed breakdown of the input tokens."""
+
+    output_tokens: int
+    """The number of output tokens."""
+
+    output_tokens_details: OutputTokensDetails
+    """A detailed breakdown of the output tokens."""
+
+    total_tokens: int
+    """The total number of tokens used."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py
new file mode 100644
index 00000000..76f26766
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_completed_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallCompletedEvent"]
+
+
+class ResponseWebSearchCallCompletedEvent(BaseModel):
+    item_id: str
+    """Unique ID for the output item associated with the web search call."""
+
+    output_index: int
+    """The index of the output item that the web search call is associated with."""
+
+    type: Literal["response.web_search_call.completed"]
+    """The type of the event. Always `response.web_search_call.completed`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py
new file mode 100644
index 00000000..681ce6d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_in_progress_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallInProgressEvent"]
+
+
+class ResponseWebSearchCallInProgressEvent(BaseModel):
+    item_id: str
+    """Unique ID for the output item associated with the web search call."""
+
+    output_index: int
+    """The index of the output item that the web search call is associated with."""
+
+    type: Literal["response.web_search_call.in_progress"]
+    """The type of the event. Always `response.web_search_call.in_progress`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py
new file mode 100644
index 00000000..c885d989
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/response_web_search_call_searching_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseWebSearchCallSearchingEvent"]
+
+
+class ResponseWebSearchCallSearchingEvent(BaseModel):
+    item_id: str
+    """Unique ID for the output item associated with the web search call."""
+
+    output_index: int
+    """The index of the output item that the web search call is associated with."""
+
+    type: Literal["response.web_search_call.searching"]
+    """The type of the event. Always `response.web_search_call.searching`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool.py
new file mode 100644
index 00000000..de5d5524
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .computer_tool import ComputerTool
+from .function_tool import FunctionTool
+from .web_search_tool import WebSearchTool
+from .file_search_tool import FileSearchTool
+
+__all__ = ["Tool"]
+
+Tool: TypeAlias = Annotated[
+    Union[FileSearchTool, FunctionTool, ComputerTool, WebSearchTool], PropertyInfo(discriminator="type")
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py
new file mode 100644
index 00000000..8d2a4f28
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceFunction"]
+
+
+class ToolChoiceFunction(BaseModel):
+    name: str
+    """The name of the function to call."""
+
+    type: Literal["function"]
+    """For function calling, the type is always `function`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py
new file mode 100644
index 00000000..910537fd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_function_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceFunctionParam"]
+
+
+class ToolChoiceFunctionParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to call."""
+
+    type: Required[Literal["function"]]
+    """For function calling, the type is always `function`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py
new file mode 100644
index 00000000..c200db54
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_options.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ToolChoiceOptions"]
+
+ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py
new file mode 100644
index 00000000..4942808f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceTypes"]
+
+
+class ToolChoiceTypes(BaseModel):
+    type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+    """The type of hosted tool the model should to use.
+
+    Learn more about
+    [built-in tools](https://platform.openai.com/docs/guides/tools).
+
+    Allowed values are:
+
+    - `file_search`
+    - `web_search_preview`
+    - `computer_use_preview`
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py
new file mode 100644
index 00000000..b14f2a9e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_choice_types_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceTypesParam"]
+
+
+class ToolChoiceTypesParam(TypedDict, total=False):
+    type: Required[
+        Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+    ]
+    """The type of hosted tool the model should to use.
+
+    Learn more about
+    [built-in tools](https://platform.openai.com/docs/guides/tools).
+
+    Allowed values are:
+
+    - `file_search`
+    - `web_search_preview`
+    - `computer_use_preview`
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py
new file mode 100644
index 00000000..be1cf824
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/tool_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypeAlias
+
+from .computer_tool_param import ComputerToolParam
+from .function_tool_param import FunctionToolParam
+from .web_search_tool_param import WebSearchToolParam
+from .file_search_tool_param import FileSearchToolParam
+from ..chat.chat_completion_tool_param import ChatCompletionToolParam
+
+__all__ = ["ToolParam"]
+
+ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, ComputerToolParam, WebSearchToolParam]
+
+ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py
new file mode 100644
index 00000000..bee270bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["WebSearchTool", "UserLocation"]
+
+
+class UserLocation(BaseModel):
+    type: Literal["approximate"]
+    """The type of location approximation. Always `approximate`."""
+
+    city: Optional[str] = None
+    """Free text input for the city of the user, e.g. `San Francisco`."""
+
+    country: Optional[str] = None
+    """
+    The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+    the user, e.g. `US`.
+    """
+
+    region: Optional[str] = None
+    """Free text input for the region of the user, e.g. `California`."""
+
+    timezone: Optional[str] = None
+    """
+    The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+    user, e.g. `America/Los_Angeles`.
+    """
+
+
+class WebSearchTool(BaseModel):
+    type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
+    """The type of the web search tool. One of:
+
+    - `web_search_preview`
+    - `web_search_preview_2025_03_11`
+    """
+
+    search_context_size: Optional[Literal["low", "medium", "high"]] = None
+    """
+    High level guidance for the amount of context window space to use for the
+    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """
+
+    user_location: Optional[UserLocation] = None
diff --git a/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py
new file mode 100644
index 00000000..8ee36ffb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/responses/web_search_tool_param.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["WebSearchToolParam", "UserLocation"]
+
+
+class UserLocation(TypedDict, total=False):
+    type: Required[Literal["approximate"]]
+    """The type of location approximation. Always `approximate`."""
+
+    city: str
+    """Free text input for the city of the user, e.g. `San Francisco`."""
+
+    country: str
+    """
+    The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
+    the user, e.g. `US`.
+    """
+
+    region: str
+    """Free text input for the region of the user, e.g. `California`."""
+
+    timezone: str
+    """
+    The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
+    user, e.g. `America/Los_Angeles`.
+    """
+
+
+class WebSearchToolParam(TypedDict, total=False):
+    type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
+    """The type of the web search tool. One of:
+
+    - `web_search_preview`
+    - `web_search_preview_2025_03_11`
+    """
+
+    search_context_size: Literal["low", "medium", "high"]
+    """
+    High level guidance for the amount of context window space to use for the
+    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """
+
+    user_location: Optional[UserLocation]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py
new file mode 100644
index 00000000..6ad0ed5e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/__init__.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .metadata import Metadata as Metadata
+from .reasoning import Reasoning as Reasoning
+from .all_models import AllModels as AllModels
+from .chat_model import ChatModel as ChatModel
+from .error_object import ErrorObject as ErrorObject
+from .compound_filter import CompoundFilter as CompoundFilter
+from .responses_model import ResponsesModel as ResponsesModel
+from .reasoning_effort import ReasoningEffort as ReasoningEffort
+from .comparison_filter import ComparisonFilter as ComparisonFilter
+from .function_definition import FunctionDefinition as FunctionDefinition
+from .function_parameters import FunctionParameters as FunctionParameters
+from .response_format_text import ResponseFormatText as ResponseFormatText
+from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
+from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py b/.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py
new file mode 100644
index 00000000..db841077
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/all_models.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .chat_model import ChatModel
+
+__all__ = ["AllModels"]
+
+AllModels: TypeAlias = Union[
+    str, ChatModel, Literal["o1-pro", "o1-pro-2025-03-19", "computer-use-preview", "computer-use-preview-2025-03-11"]
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py
new file mode 100644
index 00000000..b1937572
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/chat_model.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatModel"]
+
+ChatModel: TypeAlias = Literal[
+    "o3-mini",
+    "o3-mini-2025-01-31",
+    "o1",
+    "o1-2024-12-17",
+    "o1-preview",
+    "o1-preview-2024-09-12",
+    "o1-mini",
+    "o1-mini-2024-09-12",
+    "gpt-4o",
+    "gpt-4o-2024-11-20",
+    "gpt-4o-2024-08-06",
+    "gpt-4o-2024-05-13",
+    "gpt-4o-audio-preview",
+    "gpt-4o-audio-preview-2024-10-01",
+    "gpt-4o-audio-preview-2024-12-17",
+    "gpt-4o-mini-audio-preview",
+    "gpt-4o-mini-audio-preview-2024-12-17",
+    "gpt-4o-search-preview",
+    "gpt-4o-mini-search-preview",
+    "gpt-4o-search-preview-2025-03-11",
+    "gpt-4o-mini-search-preview-2025-03-11",
+    "chatgpt-4o-latest",
+    "gpt-4o-mini",
+    "gpt-4o-mini-2024-07-18",
+    "gpt-4-turbo",
+    "gpt-4-turbo-2024-04-09",
+    "gpt-4-0125-preview",
+    "gpt-4-turbo-preview",
+    "gpt-4-1106-preview",
+    "gpt-4-vision-preview",
+    "gpt-4",
+    "gpt-4-0314",
+    "gpt-4-0613",
+    "gpt-4-32k",
+    "gpt-4-32k-0314",
+    "gpt-4-32k-0613",
+    "gpt-3.5-turbo",
+    "gpt-3.5-turbo-16k",
+    "gpt-3.5-turbo-0301",
+    "gpt-3.5-turbo-0613",
+    "gpt-3.5-turbo-1106",
+    "gpt-3.5-turbo-0125",
+    "gpt-3.5-turbo-16k-0613",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py
new file mode 100644
index 00000000..2ec2651f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/comparison_filter.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComparisonFilter"]
+
+
+class ComparisonFilter(BaseModel):
+    key: str
+    """The key to compare against the value."""
+
+    type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
+    """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+
+    - `eq`: equals
+    - `ne`: not equal
+    - `gt`: greater than
+    - `gte`: greater than or equal
+    - `lt`: less than
+    - `lte`: less than or equal
+    """
+
+    value: Union[str, float, bool]
+    """
+    The value to compare against the attribute key; supports string, number, or
+    boolean types.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py
new file mode 100644
index 00000000..3aefa436
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/compound_filter.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from .comparison_filter import ComparisonFilter
+
+__all__ = ["CompoundFilter", "Filter"]
+
+Filter: TypeAlias = Union[ComparisonFilter, object]
+
+
+class CompoundFilter(BaseModel):
+    filters: List[Filter]
+    """Array of filters to combine.
+
+    Items can be `ComparisonFilter` or `CompoundFilter`.
+    """
+
+    type: Literal["and", "or"]
+    """Type of operation: `and` or `or`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py b/.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py
new file mode 100644
index 00000000..32d7045e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/error_object.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ErrorObject"]
+
+
+class ErrorObject(BaseModel):
+    code: Optional[str] = None
+
+    message: str
+
+    param: Optional[str] = None
+
+    type: str
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py b/.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py
new file mode 100644
index 00000000..06baa231
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/function_definition.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .function_parameters import FunctionParameters
+
+__all__ = ["FunctionDefinition"]
+
+
+class FunctionDefinition(BaseModel):
+    name: str
+    """The name of the function to be called.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    description: Optional[str] = None
+    """
+    A description of what the function does, used by the model to choose when and
+    how to call the function.
+    """
+
+    parameters: Optional[FunctionParameters] = None
+    """The parameters the functions accepts, described as a JSON Schema object.
+
+    See the [guide](https://platform.openai.com/docs/guides/function-calling) for
+    examples, and the
+    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+    documentation about the format.
+
+    Omitting `parameters` defines a function with an empty parameter list.
+    """
+
+    strict: Optional[bool] = None
+    """Whether to enable strict schema adherence when generating the function call.
+
+    If set to true, the model will follow the exact schema defined in the
+    `parameters` field. Only a subset of JSON Schema is supported when `strict` is
+    `true`. Learn more about Structured Outputs in the
+    [function calling guide](docs/guides/function-calling).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py b/.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py
new file mode 100644
index 00000000..a3d83e34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/function_parameters.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["FunctionParameters"]
+
+FunctionParameters: TypeAlias = Dict[str, object]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py b/.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py
new file mode 100644
index 00000000..0da88c67
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/metadata.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["Metadata"]
+
+Metadata: TypeAlias = Dict[str, str]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py
new file mode 100644
index 00000000..78a396d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .reasoning_effort import ReasoningEffort
+
+__all__ = ["Reasoning"]
+
+
+class Reasoning(BaseModel):
+    effort: Optional[ReasoningEffort] = None
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    generate_summary: Optional[Literal["concise", "detailed"]] = None
+    """**computer_use_preview only**
+
+    A summary of the reasoning performed by the model. This can be useful for
+    debugging and understanding the model's reasoning process. One of `concise` or
+    `detailed`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py
new file mode 100644
index 00000000..ace21b67
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/reasoning_effort.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ReasoningEffort"]
+
+ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py
new file mode 100644
index 00000000..2aaa5dbd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatJSONObject"]
+
+
+class ResponseFormatJSONObject(BaseModel):
+    type: Literal["json_object"]
+    """The type of response format being defined. Always `json_object`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py
new file mode 100644
index 00000000..c7924446
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_json_schema.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatJSONSchema", "JSONSchema"]
+
+
+class JSONSchema(BaseModel):
+    name: str
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    description: Optional[str] = None
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
+
+    strict: Optional[bool] = None
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
+
+
+class ResponseFormatJSONSchema(BaseModel):
+    json_schema: JSONSchema
+    """Structured Outputs configuration options, including a JSON Schema."""
+
+    type: Literal["json_schema"]
+    """The type of response format being defined. Always `json_schema`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py
new file mode 100644
index 00000000..f0c8cfb7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/response_format_text.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatText"]
+
+
+class ResponseFormatText(BaseModel):
+    type: Literal["text"]
+    """The type of response format being defined. Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py
new file mode 100644
index 00000000..85f154fd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared/responses_model.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from .chat_model import ChatModel
+
+__all__ = ["ResponsesModel"]
+
+ResponsesModel: TypeAlias = Union[
+    str, ChatModel, Literal["o1-pro", "o1-pro-2025-03-19", "computer-use-preview", "computer-use-preview-2025-03-11"]
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py
new file mode 100644
index 00000000..88947108
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .metadata import Metadata as Metadata
+from .reasoning import Reasoning as Reasoning
+from .chat_model import ChatModel as ChatModel
+from .compound_filter import CompoundFilter as CompoundFilter
+from .responses_model import ResponsesModel as ResponsesModel
+from .reasoning_effort import ReasoningEffort as ReasoningEffort
+from .comparison_filter import ComparisonFilter as ComparisonFilter
+from .function_definition import FunctionDefinition as FunctionDefinition
+from .function_parameters import FunctionParameters as FunctionParameters
+from .response_format_text import ResponseFormatText as ResponseFormatText
+from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
+from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py
new file mode 100644
index 00000000..ff81b07a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/chat_model.py
@@ -0,0 +1,53 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatModel"]
+
+ChatModel: TypeAlias = Literal[
+    "o3-mini",
+    "o3-mini-2025-01-31",
+    "o1",
+    "o1-2024-12-17",
+    "o1-preview",
+    "o1-preview-2024-09-12",
+    "o1-mini",
+    "o1-mini-2024-09-12",
+    "gpt-4o",
+    "gpt-4o-2024-11-20",
+    "gpt-4o-2024-08-06",
+    "gpt-4o-2024-05-13",
+    "gpt-4o-audio-preview",
+    "gpt-4o-audio-preview-2024-10-01",
+    "gpt-4o-audio-preview-2024-12-17",
+    "gpt-4o-mini-audio-preview",
+    "gpt-4o-mini-audio-preview-2024-12-17",
+    "gpt-4o-search-preview",
+    "gpt-4o-mini-search-preview",
+    "gpt-4o-search-preview-2025-03-11",
+    "gpt-4o-mini-search-preview-2025-03-11",
+    "chatgpt-4o-latest",
+    "gpt-4o-mini",
+    "gpt-4o-mini-2024-07-18",
+    "gpt-4-turbo",
+    "gpt-4-turbo-2024-04-09",
+    "gpt-4-0125-preview",
+    "gpt-4-turbo-preview",
+    "gpt-4-1106-preview",
+    "gpt-4-vision-preview",
+    "gpt-4",
+    "gpt-4-0314",
+    "gpt-4-0613",
+    "gpt-4-32k",
+    "gpt-4-32k-0314",
+    "gpt-4-32k-0613",
+    "gpt-3.5-turbo",
+    "gpt-3.5-turbo-16k",
+    "gpt-3.5-turbo-0301",
+    "gpt-3.5-turbo-0613",
+    "gpt-3.5-turbo-1106",
+    "gpt-3.5-turbo-0125",
+    "gpt-3.5-turbo-16k-0613",
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py
new file mode 100644
index 00000000..38edd315
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/comparison_filter.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ComparisonFilter"]
+
+
+class ComparisonFilter(TypedDict, total=False):
+    key: Required[str]
+    """The key to compare against the value."""
+
+    type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
+    """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+
+    - `eq`: equals
+    - `ne`: not equal
+    - `gt`: greater than
+    - `gte`: greater than or equal
+    - `lt`: less than
+    - `lte`: less than or equal
+    """
+
+    value: Required[Union[str, float, bool]]
+    """
+    The value to compare against the attribute key; supports string, number, or
+    boolean types.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py
new file mode 100644
index 00000000..d12e9b1b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/compound_filter.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .comparison_filter import ComparisonFilter
+
+__all__ = ["CompoundFilter", "Filter"]
+
+Filter: TypeAlias = Union[ComparisonFilter, object]
+
+
+class CompoundFilter(TypedDict, total=False):
+    filters: Required[Iterable[Filter]]
+    """Array of filters to combine.
+
+    Items can be `ComparisonFilter` or `CompoundFilter`.
+    """
+
+    type: Required[Literal["and", "or"]]
+    """Type of operation: `and` or `or`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py
new file mode 100644
index 00000000..d45ec13f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_definition.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from .function_parameters import FunctionParameters
+
+__all__ = ["FunctionDefinition"]
+
+
+class FunctionDefinition(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to be called.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    description: str
+    """
+    A description of what the function does, used by the model to choose when and
+    how to call the function.
+    """
+
+    parameters: FunctionParameters
+    """The parameters the functions accepts, described as a JSON Schema object.
+
+    See the [guide](https://platform.openai.com/docs/guides/function-calling) for
+    examples, and the
+    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+    documentation about the format.
+
+    Omitting `parameters` defines a function with an empty parameter list.
+    """
+
+    strict: Optional[bool]
+    """Whether to enable strict schema adherence when generating the function call.
+
+    If set to true, the model will follow the exact schema defined in the
+    `parameters` field. Only a subset of JSON Schema is supported when `strict` is
+    `true`. Learn more about Structured Outputs in the
+    [function calling guide](docs/guides/function-calling).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py
new file mode 100644
index 00000000..45fc742d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/function_parameters.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["FunctionParameters"]
+
+FunctionParameters: TypeAlias = Dict[str, object]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py
new file mode 100644
index 00000000..821650b4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/metadata.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["Metadata"]
+
+Metadata: TypeAlias = Dict[str, str]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py
new file mode 100644
index 00000000..2953b895
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypedDict
+
+from ..shared.reasoning_effort import ReasoningEffort
+
+__all__ = ["Reasoning"]
+
+
+class Reasoning(TypedDict, total=False):
+    effort: Optional[ReasoningEffort]
+    """**o-series models only**
+
+    Constrains effort on reasoning for
+    [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
+    result in faster responses and fewer tokens used on reasoning in a response.
+    """
+
+    generate_summary: Optional[Literal["concise", "detailed"]]
+    """**computer_use_preview only**
+
+    A summary of the reasoning performed by the model. This can be useful for
+    debugging and understanding the model's reasoning process. One of `concise` or
+    `detailed`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py
new file mode 100644
index 00000000..6052c5ae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/reasoning_effort.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ReasoningEffort"]
+
+ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py
new file mode 100644
index 00000000..d4d1deaa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatJSONObject"]
+
+
+class ResponseFormatJSONObject(TypedDict, total=False):
+    type: Required[Literal["json_object"]]
+    """The type of response format being defined. Always `json_object`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py
new file mode 100644
index 00000000..5b0a13ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_json_schema.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatJSONSchema", "JSONSchema"]
+
+
+class JSONSchema(TypedDict, total=False):
+    name: Required[str]
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    description: str
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    schema: Dict[str, object]
+    """
+    The schema for the response format, described as a JSON Schema object. Learn how
+    to build JSON schemas [here](https://json-schema.org/).
+    """
+
+    strict: Optional[bool]
+    """
+    Whether to enable strict schema adherence when generating the output. If set to
+    true, the model will always follow the exact schema defined in the `schema`
+    field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+    learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
+
+
+class ResponseFormatJSONSchema(TypedDict, total=False):
+    json_schema: Required[JSONSchema]
+    """Structured Outputs configuration options, including a JSON Schema."""
+
+    type: Required[Literal["json_schema"]]
+    """The type of response format being defined. Always `json_schema`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py
new file mode 100644
index 00000000..c3ef2b08
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/response_format_text.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatText"]
+
+
+class ResponseFormatText(TypedDict, total=False):
+    type: Required[Literal["text"]]
+    """The type of response format being defined. Always `text`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py b/.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py
new file mode 100644
index 00000000..3bf0e137
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/shared_params/responses_model.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, TypeAlias
+
+from ..shared.chat_model import ChatModel
+
+__all__ = ["ResponsesModel"]
+
+ResponsesModel: TypeAlias = Union[
+    str, ChatModel, Literal["o1-pro", "o1-pro-2025-03-19", "computer-use-preview", "computer-use-preview-2025-03-11"]
+]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py
new file mode 100644
index 00000000..2813bc66
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["StaticFileChunkingStrategy"]
+
+
+class StaticFileChunkingStrategy(BaseModel):
+    chunk_overlap_tokens: int
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: int
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py
new file mode 100644
index 00000000..2a95dce5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .static_file_chunking_strategy import StaticFileChunkingStrategy
+
+__all__ = ["StaticFileChunkingStrategyObject"]
+
+
+class StaticFileChunkingStrategyObject(BaseModel):
+    static: StaticFileChunkingStrategy
+
+    type: Literal["static"]
+    """Always `static`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py
new file mode 100644
index 00000000..0cdf35c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_object_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam
+
+__all__ = ["StaticFileChunkingStrategyObjectParam"]
+
+
+class StaticFileChunkingStrategyObjectParam(TypedDict, total=False):
+    static: Required[StaticFileChunkingStrategyParam]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py
new file mode 100644
index 00000000..f917ac56
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/static_file_chunking_strategy_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["StaticFileChunkingStrategyParam"]
+
+
+class StaticFileChunkingStrategyParam(TypedDict, total=False):
+    chunk_overlap_tokens: Required[int]
+    """The number of tokens that overlap between chunks. The default value is `400`.
+
+    Note that the overlap must not exceed half of `max_chunk_size_tokens`.
+    """
+
+    max_chunk_size_tokens: Required[int]
+    """The maximum number of tokens in each chunk.
+
+    The default value is `800`. The minimum value is `100` and the maximum value is
+    `4096`.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/upload.py b/.venv/lib/python3.12/site-packages/openai/types/upload.py
new file mode 100644
index 00000000..914b69a8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/upload.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .file_object import FileObject
+
+__all__ = ["Upload"]
+
+
+class Upload(BaseModel):
+    id: str
+    """The Upload unique identifier, which can be referenced in API endpoints."""
+
+    bytes: int
+    """The intended number of bytes to be uploaded."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the Upload was created."""
+
+    expires_at: int
+    """The Unix timestamp (in seconds) for when the Upload will expire."""
+
+    filename: str
+    """The name of the file to be uploaded."""
+
+    object: Literal["upload"]
+    """The object type, which is always "upload"."""
+
+    purpose: str
+    """The intended purpose of the file.
+
+    [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose)
+    for acceptable values.
+    """
+
+    status: Literal["pending", "completed", "cancelled", "expired"]
+    """The status of the Upload."""
+
+    file: Optional[FileObject] = None
+    """The `File` object represents a document that has been uploaded to OpenAI."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py b/.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py
new file mode 100644
index 00000000..cce568d5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/upload_complete_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+__all__ = ["UploadCompleteParams"]
+
+
+class UploadCompleteParams(TypedDict, total=False):
+    part_ids: Required[List[str]]
+    """The ordered list of Part IDs."""
+
+    md5: str
+    """
+    The optional md5 checksum for the file contents to verify if the bytes uploaded
+    matches what you expect.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py
new file mode 100644
index 00000000..2ebabe6c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/upload_create_params.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .file_purpose import FilePurpose
+
+__all__ = ["UploadCreateParams"]
+
+
+class UploadCreateParams(TypedDict, total=False):
+    bytes: Required[int]
+    """The number of bytes in the file you are uploading."""
+
+    filename: Required[str]
+    """The name of the file to upload."""
+
+    mime_type: Required[str]
+    """The MIME type of the file.
+
+    This must fall within the supported MIME types for your file purpose. See the
+    supported MIME types for assistants and vision.
+    """
+
+    purpose: Required[FilePurpose]
+    """The intended purpose of the uploaded file.
+
+    See the
+    [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py
new file mode 100644
index 00000000..41deb0ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/uploads/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .upload_part import UploadPart as UploadPart
+from .part_create_params import PartCreateParams as PartCreateParams
diff --git a/.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py
new file mode 100644
index 00000000..9851ca41
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/uploads/part_create_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ..._types import FileTypes
+
+__all__ = ["PartCreateParams"]
+
+
+class PartCreateParams(TypedDict, total=False):
+    data: Required[FileTypes]
+    """The chunk of bytes for this Part."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py b/.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py
new file mode 100644
index 00000000..e09621d8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/uploads/upload_part.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["UploadPart"]
+
+
+class UploadPart(BaseModel):
+    id: str
+    """The upload Part unique identifier, which can be referenced in API endpoints."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the Part was created."""
+
+    object: Literal["upload.part"]
+    """The object type, which is always `upload.part`."""
+
+    upload_id: str
+    """The ID of the Upload object that this Part was added to."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store.py
new file mode 100644
index 00000000..2473a442
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store.py
@@ -0,0 +1,82 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .shared.metadata import Metadata
+
+__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
+
+
+class FileCounts(BaseModel):
+    cancelled: int
+    """The number of files that were cancelled."""
+
+    completed: int
+    """The number of files that have been successfully processed."""
+
+    failed: int
+    """The number of files that have failed to process."""
+
+    in_progress: int
+    """The number of files that are currently being processed."""
+
+    total: int
+    """The total number of files."""
+
+
+class ExpiresAfter(BaseModel):
+    anchor: Literal["last_active_at"]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `last_active_at`.
+    """
+
+    days: int
+    """The number of days after the anchor time that the vector store will expire."""
+
+
+class VectorStore(BaseModel):
+    id: str
+    """The identifier, which can be referenced in API endpoints."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the vector store was created."""
+
+    file_counts: FileCounts
+
+    last_active_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the vector store was last active."""
+
+    metadata: Optional[Metadata] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    name: str
+    """The name of the vector store."""
+
+    object: Literal["vector_store"]
+    """The object type, which is always `vector_store`."""
+
+    status: Literal["expired", "in_progress", "completed"]
+    """
+    The status of the vector store, which can be either `expired`, `in_progress`, or
+    `completed`. A status of `completed` indicates that the vector store is ready
+    for use.
+    """
+
+    usage_bytes: int
+    """The total number of bytes used by the files in the vector store."""
+
+    expires_after: Optional[ExpiresAfter] = None
+    """The expiration policy for a vector store."""
+
+    expires_at: Optional[int] = None
+    """The Unix timestamp (in seconds) for when the vector store will expire."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py
new file mode 100644
index 00000000..365d0936
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_create_params.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .shared_params.metadata import Metadata
+from .file_chunking_strategy_param import FileChunkingStrategyParam
+
+__all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
+
+
+class VectorStoreCreateParams(TypedDict, total=False):
+    chunking_strategy: FileChunkingStrategyParam
+    """The chunking strategy used to chunk the file(s).
+
+    If not set, will use the `auto` strategy. Only applicable if `file_ids` is
+    non-empty.
+    """
+
+    expires_after: ExpiresAfter
+    """The expiration policy for a vector store."""
+
+    file_ids: List[str]
+    """
+    A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+    the vector store should use. Useful for tools like `file_search` that can access
+    files.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    name: str
+    """The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+    anchor: Required[Literal["last_active_at"]]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `last_active_at`.
+    """
+
+    days: Required[int]
+    """The number of days after the anchor time that the vector store will expire."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py
new file mode 100644
index 00000000..dfac9ce8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["VectorStoreDeleted"]
+
+
+class VectorStoreDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["vector_store.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py
new file mode 100644
index 00000000..e26ff90a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_list_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["VectorStoreListParams"]
+
+
+class VectorStoreListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    before: str
+    """A cursor for use in pagination.
+
+    `before` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, starting with obj_foo, your
+    subsequent call can include before=obj_foo in order to fetch the previous page
+    of the list.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py
new file mode 100644
index 00000000..17573d0f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_params.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .shared_params.compound_filter import CompoundFilter
+from .shared_params.comparison_filter import ComparisonFilter
+
+__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"]
+
+
+class VectorStoreSearchParams(TypedDict, total=False):
+    query: Required[Union[str, List[str]]]
+    """A query string for a search"""
+
+    filters: Filters
+    """A filter to apply based on file attributes."""
+
+    max_num_results: int
+    """The maximum number of results to return.
+
+    This number should be between 1 and 50 inclusive.
+    """
+
+    ranking_options: RankingOptions
+    """Ranking options for search."""
+
+    rewrite_query: bool
+    """Whether to rewrite the natural language query for vector search."""
+
+
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+
+
+class RankingOptions(TypedDict, total=False):
+    ranker: Literal["auto", "default-2024-11-15"]
+
+    score_threshold: float
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py
new file mode 100644
index 00000000..d78b71bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_search_response.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["VectorStoreSearchResponse", "Content"]
+
+
+class Content(BaseModel):
+    text: str
+    """The text content returned from search."""
+
+    type: Literal["text"]
+    """The type of content."""
+
+
+class VectorStoreSearchResponse(BaseModel):
+    attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    content: List[Content]
+    """Content chunks from the file."""
+
+    file_id: str
+    """The ID of the vector store file."""
+
+    filename: str
+    """The name of the vector store file."""
+
+    score: float
+    """The similarity score for the result."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py
new file mode 100644
index 00000000..4f6ac639
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_store_update_params.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .shared_params.metadata import Metadata
+
+__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
+
+
+class VectorStoreUpdateParams(TypedDict, total=False):
+    expires_after: Optional[ExpiresAfter]
+    """The expiration policy for a vector store."""
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard.
+
+    Keys are strings with a maximum length of 64 characters. Values are strings with
+    a maximum length of 512 characters.
+    """
+
+    name: Optional[str]
+    """The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+    anchor: Required[Literal["last_active_at"]]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `last_active_at`.
+    """
+
+    days: Required[int]
+    """The number of days after the anchor time that the vector store will expire."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py
new file mode 100644
index 00000000..96ce3014
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/__init__.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .file_list_params import FileListParams as FileListParams
+from .vector_store_file import VectorStoreFile as VectorStoreFile
+from .file_create_params import FileCreateParams as FileCreateParams
+from .file_update_params import FileUpdateParams as FileUpdateParams
+from .file_content_response import FileContentResponse as FileContentResponse
+from .vector_store_file_batch import VectorStoreFileBatch as VectorStoreFileBatch
+from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams
+from .vector_store_file_deleted import VectorStoreFileDeleted as VectorStoreFileDeleted
+from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py
new file mode 100644
index 00000000..1a470f75
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_create_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Required, TypedDict
+
+from ..file_chunking_strategy_param import FileChunkingStrategyParam
+
+__all__ = ["FileBatchCreateParams"]
+
+
+class FileBatchCreateParams(TypedDict, total=False):
+    file_ids: Required[List[str]]
+    """
+    A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+    the vector store should use. Useful for tools like `file_search` that can access
+    files.
+    """
+
+    attributes: Optional[Dict[str, Union[str, float, bool]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    chunking_strategy: FileChunkingStrategyParam
+    """The chunking strategy used to chunk the file(s).
+
+    If not set, will use the `auto` strategy. Only applicable if `file_ids` is
+    non-empty.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py
new file mode 100644
index 00000000..2a0a6c6a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_batch_list_files_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FileBatchListFilesParams"]
+
+
+class FileBatchListFilesParams(TypedDict, total=False):
+    vector_store_id: Required[str]
+
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    before: str
+    """A cursor for use in pagination.
+
+    `before` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, starting with obj_foo, your
+    subsequent call can include before=obj_foo in order to fetch the previous page
+    of the list.
+    """
+
+    filter: Literal["in_progress", "completed", "failed", "cancelled"]
+    """Filter by file status.
+
+    One of `in_progress`, `completed`, `failed`, `cancelled`.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py
new file mode 100644
index 00000000..32db2f2c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_content_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FileContentResponse"]
+
+
+class FileContentResponse(BaseModel):
+    text: Optional[str] = None
+    """The text content"""
+
+    type: Optional[str] = None
+    """The content type (currently only `"text"`)"""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py
new file mode 100644
index 00000000..5b898925
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_create_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Optional
+from typing_extensions import Required, TypedDict
+
+from ..file_chunking_strategy_param import FileChunkingStrategyParam
+
+__all__ = ["FileCreateParams"]
+
+
+class FileCreateParams(TypedDict, total=False):
+    file_id: Required[str]
+    """
+    A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+    vector store should use. Useful for tools like `file_search` that can access
+    files.
+    """
+
+    attributes: Optional[Dict[str, Union[str, float, bool]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    chunking_strategy: FileChunkingStrategyParam
+    """The chunking strategy used to chunk the file(s).
+
+    If not set, will use the `auto` strategy. Only applicable if `file_ids` is
+    non-empty.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py
new file mode 100644
index 00000000..867b5fb3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_list_params.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["FileListParams"]
+
+
+class FileListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    before: str
+    """A cursor for use in pagination.
+
+    `before` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, starting with obj_foo, your
+    subsequent call can include before=obj_foo in order to fetch the previous page
+    of the list.
+    """
+
+    filter: Literal["in_progress", "completed", "failed", "cancelled"]
+    """Filter by file status.
+
+    One of `in_progress`, `completed`, `failed`, `cancelled`.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """Sort order by the `created_at` timestamp of the objects.
+
+    `asc` for ascending order and `desc` for descending order.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py
new file mode 100644
index 00000000..ebf540d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/file_update_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["FileUpdateParams"]
+
+
+class FileUpdateParams(TypedDict, total=False):
+    vector_store_id: Required[str]
+
+    attributes: Required[Optional[Dict[str, Union[str, float, bool]]]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py
new file mode 100644
index 00000000..b59a61df
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..file_chunking_strategy import FileChunkingStrategy
+
+__all__ = ["VectorStoreFile", "LastError"]
+
+
+class LastError(BaseModel):
+    code: Literal["server_error", "unsupported_file", "invalid_file"]
+    """One of `server_error` or `rate_limit_exceeded`."""
+
+    message: str
+    """A human-readable description of the error."""
+
+
+class VectorStoreFile(BaseModel):
+    id: str
+    """The identifier, which can be referenced in API endpoints."""
+
+    created_at: int
+    """The Unix timestamp (in seconds) for when the vector store file was created."""
+
+    last_error: Optional[LastError] = None
+    """The last error associated with this vector store file.
+
+    Will be `null` if there are no errors.
+    """
+
+    object: Literal["vector_store.file"]
+    """The object type, which is always `vector_store.file`."""
+
+    status: Literal["in_progress", "completed", "cancelled", "failed"]
+    """
+    The status of the vector store file, which can be either `in_progress`,
+    `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
+    vector store file is ready for use.
+    """
+
+    usage_bytes: int
+    """The total vector store usage in bytes.
+
+    Note that this may be different from the original file size.
+    """
+
+    vector_store_id: str
+    """
+    The ID of the
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    that the [File](https://platform.openai.com/docs/api-reference/files) is
+    attached to.
+    """
+
+    attributes: Optional[Dict[str, Union[str, float, bool]]] = None
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters, booleans, or numbers.
+    """
+
+    chunking_strategy: Optional[FileChunkingStrategy] = None
+    """The strategy used to chunk the file."""
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py
new file mode 100644
index 00000000..57dbfbd8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_batch.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["VectorStoreFileBatch", "FileCounts"]
+
+
+class FileCounts(BaseModel):
+    cancelled: int
+    """The number of files that where cancelled."""
+
+    completed: int
+    """The number of files that have been processed."""
+
+    failed: int
+    """The number of files that have failed to process."""
+
+    in_progress: int
+    """The number of files that are currently being processed."""
+
+    total: int
+    """The total number of files."""
+
+
+class VectorStoreFileBatch(BaseModel):
+    id: str
+    """The identifier, which can be referenced in API endpoints."""
+
+    created_at: int
+    """
+    The Unix timestamp (in seconds) for when the vector store files batch was
+    created.
+    """
+
+    file_counts: FileCounts
+
+    object: Literal["vector_store.files_batch"]
+    """The object type, which is always `vector_store.file_batch`."""
+
+    status: Literal["in_progress", "completed", "cancelled", "failed"]
+    """
+    The status of the vector store files batch, which can be either `in_progress`,
+    `completed`, `cancelled` or `failed`.
+    """
+
+    vector_store_id: str
+    """
+    The ID of the
+    [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+    that the [File](https://platform.openai.com/docs/api-reference/files) is
+    attached to.
+    """
diff --git a/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py
new file mode 100644
index 00000000..5c856f26
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/vector_stores/vector_store_file_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["VectorStoreFileDeleted"]
+
+
+class VectorStoreFileDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["vector_store.file.deleted"]
diff --git a/.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py b/.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py
new file mode 100644
index 00000000..40fd24ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/types/websocket_connection_options.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from typing_extensions import Sequence, TypedDict
+
+if TYPE_CHECKING:
+    from websockets import Subprotocol
+    from websockets.extensions import ClientExtensionFactory
+
+
+class WebsocketConnectionOptions(TypedDict, total=False):
+    """Websocket connection options copied from `websockets`.
+
+    For example: https://websockets.readthedocs.io/en/stable/reference/asyncio/client.html#websockets.asyncio.client.connect
+    """
+
+    extensions: Sequence[ClientExtensionFactory] | None
+    """List of supported extensions, in order in which they should be negotiated and run."""
+
+    subprotocols: Sequence[Subprotocol] | None
+    """List of supported subprotocols, in order of decreasing preference."""
+
+    compression: str | None
+    """The “permessage-deflate” extension is enabled by default. Set compression to None to disable it. See the [compression guide](https://websockets.readthedocs.io/en/stable/topics/compression.html) for details."""
+
+    # limits
+    max_size: int | None
+    """Maximum size of incoming messages in bytes. None disables the limit."""
+
+    max_queue: int | None | tuple[int | None, int | None]
+    """High-water mark of the buffer where frames are received. It defaults to 16 frames. The low-water mark defaults to max_queue // 4. You may pass a (high, low) tuple to set the high-water and low-water marks. If you want to disable flow control entirely, you may set it to None, although that’s a bad idea."""
+
+    write_limit: int | tuple[int, int | None]
+    """High-water mark of write buffer in bytes. It is passed to set_write_buffer_limits(). It defaults to 32 KiB. You may pass a (high, low) tuple to set the high-water and low-water marks."""
diff --git a/.venv/lib/python3.12/site-packages/openai/version.py b/.venv/lib/python3.12/site-packages/openai/version.py
new file mode 100644
index 00000000..01a08ab5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/openai/version.py
@@ -0,0 +1,3 @@
+from ._version import __version__
+
+VERSION: str = __version__