From e6983c11d78bf315504797e34680d9fa68189bfb Mon Sep 17 00:00:00 2001 From: "bobo.yang" Date: Wed, 15 Nov 2023 15:46:46 +0800 Subject: [PATCH] update devchat 11.15 --- .DS_Store | Bin 10244 -> 10244 bytes .../.DS_Store | Bin site-packages/.DS_Store | Bin 6148 -> 6148 bytes .../Pygments-2.16.1.dist-info/RECORD | 2 +- site-packages/_yaml/.DS_Store | Bin 6148 -> 0 bytes site-packages/anyio/.DS_Store | Bin 6148 -> 0 bytes site-packages/anyio/_backends/.DS_Store | Bin 6148 -> 0 bytes site-packages/anyio/_core/.DS_Store | Bin 6148 -> 0 bytes site-packages/anyio/abc/.DS_Store | Bin 6148 -> 0 bytes site-packages/anyio/streams/.DS_Store | Bin 6148 -> 0 bytes site-packages/certifi/.DS_Store | Bin 6148 -> 0 bytes .../charset_normalizer-3.3.2.dist-info/RECORD | 2 +- site-packages/charset_normalizer/.DS_Store | Bin 6148 -> 0 bytes .../charset_normalizer/cli/.DS_Store | Bin 6148 -> 0 bytes site-packages/click-8.1.7.dist-info/RECORD | 1 - .../INSTALLER | 0 .../colorama-0.4.6.dist-info/METADATA | 441 ++++++++++++ site-packages/colorama-0.4.6.dist-info/RECORD | 31 + .../WHEEL | 4 +- .../licenses/LICENSE.txt | 27 + site-packages/colorama/__init__.py | 7 + site-packages/colorama/ansi.py | 102 +++ site-packages/colorama/ansitowin32.py | 277 ++++++++ site-packages/colorama/initialise.py | 121 ++++ site-packages/colorama/tests/__init__.py | 1 + site-packages/colorama/tests/ansi_test.py | 76 ++ .../colorama/tests/ansitowin32_test.py | 294 ++++++++ .../colorama/tests/initialise_test.py | 189 +++++ site-packages/colorama/tests/isatty_test.py | 57 ++ site-packages/colorama/tests/utils.py | 49 ++ site-packages/colorama/tests/winterm_test.py | 131 ++++ site-packages/colorama/win32.py | 180 +++++ site-packages/colorama/winterm.py | 195 ++++++ .../INSTALLER | 0 .../LICENSE | 0 .../METADATA | 5 +- .../RECORD | 34 +- .../REQUESTED | 0 .../WHEEL | 0 .../direct_url.json | 0 .../entry_points.txt | 0 .../devchat-0.2.9.dist-info/REQUESTED | 0 site-packages/devchat/.DS_Store | Bin 6148 -> 0 bytes site-packages/devchat/_cli/run.py | 9 +- site-packages/devchat/_cli/utils.py | 18 +- site-packages/devchat/openai/openai_prompt.py | 4 +- site-packages/devchat/utils.py | 6 +- site-packages/distro-1.8.0.dist-info/RECORD | 2 +- site-packages/distro/.DS_Store | Bin 6148 -> 0 bytes site-packages/exceptiongroup/.DS_Store | Bin 6148 -> 0 bytes site-packages/git/.DS_Store | Bin 6148 -> 0 bytes site-packages/git/index/.DS_Store | Bin 6148 -> 0 bytes site-packages/git/objects/.DS_Store | Bin 6148 -> 0 bytes site-packages/git/objects/submodule/.DS_Store | Bin 6148 -> 0 bytes site-packages/git/refs/.DS_Store | Bin 6148 -> 0 bytes site-packages/git/repo/.DS_Store | Bin 6148 -> 0 bytes site-packages/gitdb/.DS_Store | Bin 6148 -> 0 bytes site-packages/gitdb/db/.DS_Store | Bin 6148 -> 0 bytes site-packages/gitdb/test/.DS_Store | Bin 6148 -> 0 bytes site-packages/gitdb/utils/.DS_Store | Bin 6148 -> 0 bytes site-packages/h11/.DS_Store | Bin 6148 -> 0 bytes site-packages/h11/tests/.DS_Store | Bin 6148 -> 0 bytes .../INSTALLER | 0 .../METADATA | 66 +- .../RECORD | 22 +- .../WHEEL | 0 .../licenses/LICENSE.md | 0 site-packages/httpcore/.DS_Store | Bin 6148 -> 0 bytes site-packages/httpcore/__init__.py | 2 +- site-packages/httpcore/_async/.DS_Store | Bin 6148 -> 0 bytes .../httpcore/_async/connection_pool.py | 20 +- site-packages/httpcore/_backends/.DS_Store | Bin 6148 -> 0 bytes site-packages/httpcore/_backends/auto.py | 5 +- site-packages/httpcore/_backends/sync.py | 6 - site-packages/httpcore/_sync/.DS_Store | Bin 6148 -> 0 bytes .../httpcore/_sync/connection_pool.py | 20 +- site-packages/httpcore/_synchronization.py | 108 ++- .../INSTALLER | 0 .../METADATA | 23 +- .../RECORD | 32 +- .../WHEEL | 0 .../entry_points.txt | 0 .../licenses/LICENSE.md | 0 site-packages/httpx/.DS_Store | Bin 6148 -> 0 bytes site-packages/httpx/__version__.py | 2 +- site-packages/httpx/_auth.py | 9 +- site-packages/httpx/_compat.py | 4 +- site-packages/httpx/_config.py | 17 +- site-packages/httpx/_models.py | 17 +- site-packages/httpx/_multipart.py | 3 +- site-packages/httpx/_transports/.DS_Store | Bin 6148 -> 0 bytes site-packages/httpx/_transports/default.py | 2 +- site-packages/httpx/_urlparse.py | 2 +- site-packages/httpx/_utils.py | 35 - site-packages/idna/.DS_Store | Bin 6148 -> 0 bytes site-packages/importlib_metadata/.DS_Store | Bin 6148 -> 0 bytes .../LICENSE | 202 ------ .../METADATA | 104 --- .../RECORD | 72 -- .../REQUESTED | 0 .../top_level.txt | 1 - site-packages/importlib_resources/__init__.py | 17 - .../importlib_resources/_adapters.py | 168 ----- site-packages/importlib_resources/_common.py | 207 ------ site-packages/importlib_resources/_compat.py | 126 ---- .../importlib_resources/_itertools.py | 38 - site-packages/importlib_resources/abc.py | 170 ----- site-packages/importlib_resources/py.typed | 0 site-packages/importlib_resources/readers.py | 172 ----- site-packages/importlib_resources/simple.py | 106 --- .../importlib_resources/tests/__init__.py | 0 .../importlib_resources/tests/_compat.py | 32 - .../importlib_resources/tests/_path.py | 56 -- .../tests/data01/__init__.py | 0 .../tests/data01/binary.file | Bin 4 -> 0 bytes .../tests/data01/subdirectory/__init__.py | 0 .../tests/data01/subdirectory/binary.file | Bin 4 -> 0 bytes .../tests/data01/utf-16.file | Bin 44 -> 0 bytes .../tests/data01/utf-8.file | 1 - .../tests/data02/__init__.py | 0 .../tests/data02/one/__init__.py | 0 .../tests/data02/one/resource1.txt | 1 - .../subdirectory/subsubdir/resource.txt | 1 - .../tests/data02/two/__init__.py | 0 .../tests/data02/two/resource2.txt | 1 - .../tests/namespacedata01/binary.file | Bin 4 -> 0 bytes .../tests/namespacedata01/utf-16.file | Bin 44 -> 0 bytes .../tests/namespacedata01/utf-8.file | 1 - .../tests/test_compatibilty_files.py | 104 --- .../tests/test_contents.py | 43 -- .../importlib_resources/tests/test_custom.py | 45 -- .../importlib_resources/tests/test_files.py | 112 --- .../importlib_resources/tests/test_open.py | 85 --- .../importlib_resources/tests/test_path.py | 69 -- .../importlib_resources/tests/test_read.py | 80 --- .../importlib_resources/tests/test_reader.py | 143 ---- .../tests/test_resource.py | 221 ------ .../importlib_resources/tests/util.py | 165 ----- .../importlib_resources/tests/zip.py | 32 - site-packages/markdown_it/.DS_Store | Bin 6148 -> 0 bytes site-packages/markdown_it/cli/.DS_Store | Bin 6148 -> 0 bytes site-packages/markdown_it/common/.DS_Store | Bin 6148 -> 0 bytes site-packages/markdown_it/helpers/.DS_Store | Bin 6148 -> 0 bytes site-packages/markdown_it/presets/.DS_Store | Bin 6148 -> 0 bytes .../markdown_it/rules_block/.DS_Store | Bin 6148 -> 0 bytes .../markdown_it/rules_core/.DS_Store | Bin 6148 -> 0 bytes .../markdown_it/rules_inline/.DS_Store | Bin 6148 -> 0 bytes .../markdown_it_py-3.0.0.dist-info/RECORD | 2 +- site-packages/mdurl/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/algorithms/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/approximation/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/approximation/tests/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/assortativity/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/assortativity/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/bipartite/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/bipartite/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/centrality/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/centrality/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/coloring/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/coloring/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/community/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/community/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/components/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/components/tests/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/connectivity/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/connectivity/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/flow/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/flow/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/isomorphism/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/isomorphism/tests/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/link_analysis/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/link_analysis/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/minors/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/minors/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/operators/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/operators/tests/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/shortest_paths/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/shortest_paths/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/traversal/.DS_Store | Bin 6148 -> 0 bytes .../algorithms/traversal/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/tree/.DS_Store | Bin 6148 -> 0 bytes .../networkx/algorithms/tree/tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/classes/.DS_Store | Bin 6148 -> 0 bytes .../networkx/classes/tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/drawing/.DS_Store | Bin 6148 -> 0 bytes .../networkx/drawing/tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/generators/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/generators/atlas.py | 4 +- .../networkx/generators/tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/linalg/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/linalg/tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/readwrite/.DS_Store | Bin 6148 -> 0 bytes .../networkx/readwrite/json_graph/.DS_Store | Bin 6148 -> 0 bytes .../readwrite/json_graph/tests/.DS_Store | Bin 6148 -> 0 bytes .../networkx/readwrite/tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/utils/.DS_Store | Bin 6148 -> 0 bytes site-packages/networkx/utils/tests/.DS_Store | Bin 6148 -> 0 bytes .../openai-1.0.0rc1.dist-info/RECORD | 236 ------- .../INSTALLER | 0 .../METADATA | 60 +- site-packages/openai-1.2.4.dist-info/RECORD | 392 +++++++++++ .../WHEEL | 0 .../entry_points.txt | 0 .../licenses/LICENSE | 0 site-packages/openai/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/__init__.py | 2 + site-packages/openai/_base_client.py | 112 ++- site-packages/openai/_client.py | 46 +- site-packages/openai/_extras/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/_files.py | 2 +- site-packages/openai/_models.py | 16 +- site-packages/openai/_module_client.py | 7 + site-packages/openai/_response.py | 5 +- site-packages/openai/_streaming.py | 14 +- site-packages/openai/_types.py | 167 ++++- site-packages/openai/_utils/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/_utils/_transform.py | 13 +- site-packages/openai/_version.py | 2 +- site-packages/openai/cli/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/cli/_api/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/cli/_api/audio.py | 4 +- site-packages/openai/cli/_api/chat/.DS_Store | Bin 6148 -> 0 bytes .../openai/cli/_api/chat/completions.py | 6 +- site-packages/openai/cli/_api/files.py | 9 +- site-packages/openai/cli/_tools/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/lib/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/lib/_old_api.py | 66 ++ site-packages/openai/lib/_validators.py | 2 +- site-packages/openai/lib/azure.py | 1 + site-packages/openai/pagination.py | 6 +- site-packages/openai/resources/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/resources/__init__.py | 5 + .../openai/resources/audio/.DS_Store | Bin 6148 -> 0 bytes .../openai/resources/audio/__init__.py | 10 + site-packages/openai/resources/audio/audio.py | 12 + .../openai/resources/audio/speech.py | 168 +++++ .../openai/resources/audio/transcriptions.py | 14 +- .../openai/resources/audio/translations.py | 14 +- .../openai/resources/beta/__init__.py | 30 + .../resources/beta/assistants/__init__.py | 20 + .../resources/beta/assistants/assistants.py | 656 ++++++++++++++++++ .../openai/resources/beta/assistants/files.py | 416 +++++++++++ site-packages/openai/resources/beta/beta.py | 60 ++ .../openai/resources/beta/threads/__init__.py | 30 + .../beta/threads/messages/__init__.py | 20 + .../resources/beta/threads/messages/files.py | 259 +++++++ .../beta/threads/messages/messages.py | 479 +++++++++++++ .../resources/beta/threads/runs/__init__.py | 15 + .../resources/beta/threads/runs/runs.py | 656 ++++++++++++++++++ .../resources/beta/threads/runs/steps.py | 257 +++++++ .../openai/resources/beta/threads/threads.py | 543 +++++++++++++++ site-packages/openai/resources/chat/.DS_Store | Bin 6148 -> 0 bytes .../openai/resources/chat/completions.py | 394 +++++++++-- site-packages/openai/resources/completions.py | 82 ++- site-packages/openai/resources/edits.py | 6 +- site-packages/openai/resources/embeddings.py | 6 +- site-packages/openai/resources/files.py | 210 ++++-- site-packages/openai/resources/fine_tunes.py | 34 +- .../openai/resources/fine_tuning/.DS_Store | Bin 6148 -> 0 bytes .../openai/resources/fine_tuning/jobs.py | 22 +- site-packages/openai/resources/images.py | 96 ++- site-packages/openai/resources/models.py | 14 +- site-packages/openai/resources/moderations.py | 6 +- site-packages/openai/types/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/types/__init__.py | 3 + site-packages/openai/types/audio/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/types/audio/__init__.py | 1 + .../types/audio/speech_create_params.py | 34 + .../audio/transcription_create_params.py | 4 +- .../types/audio/translation_create_params.py | 4 +- site-packages/openai/types/beta/__init__.py | 16 + site-packages/openai/types/beta/assistant.py | 85 +++ .../types/beta/assistant_create_params.py | 82 +++ .../openai/types/beta/assistant_deleted.py | 15 + .../types/beta/assistant_list_params.py | 39 ++ .../types/beta/assistant_update_params.py | 84 +++ .../openai/types/beta/assistants/__init__.py | 8 + .../types/beta/assistants/assistant_file.py | 21 + .../beta/assistants/file_create_params.py | 16 + .../beta/assistants/file_delete_response.py | 15 + .../types/beta/assistants/file_list_params.py | 39 ++ .../openai/types/beta/chat/__init__.py | 3 + site-packages/openai/types/beta/thread.py | 28 + .../beta/thread_create_and_run_params.py | 121 ++++ .../openai/types/beta/thread_create_params.py | 51 ++ .../openai/types/beta/thread_deleted.py | 15 + .../openai/types/beta/thread_update_params.py | 18 + .../openai/types/beta/threads/__init__.py | 22 + .../threads/message_content_image_file.py | 22 + .../beta/threads/message_content_text.py | 74 ++ .../beta/threads/message_create_params.py | 35 + .../types/beta/threads/message_list_params.py | 39 ++ .../beta/threads/message_update_params.py | 20 + .../types/beta/threads/messages/__init__.py | 6 + .../beta/threads/messages/file_list_params.py | 41 ++ .../beta/threads/messages/message_file.py | 25 + .../required_action_function_tool_call.py | 34 + .../openai/types/beta/threads/run.py | 154 ++++ .../types/beta/threads/run_create_params.py | 73 ++ .../types/beta/threads/run_list_params.py | 39 ++ .../threads/run_submit_tool_outputs_params.py | 26 + .../types/beta/threads/run_update_params.py | 20 + .../types/beta/threads/runs/__init__.py | 13 + .../types/beta/threads/runs/code_tool_call.py | 67 ++ .../beta/threads/runs/function_tool_call.py | 38 + .../runs/message_creation_step_details.py | 19 + .../beta/threads/runs/retrieval_tool_call.py | 21 + .../types/beta/threads/runs/run_step.py | 93 +++ .../beta/threads/runs/step_list_params.py | 41 ++ .../threads/runs/tool_calls_step_details.py | 25 + .../types/beta/threads/thread_message.py | 65 ++ site-packages/openai/types/chat/.DS_Store | Bin 6148 -> 0 bytes site-packages/openai/types/chat/__init__.py | 42 ++ .../openai/types/chat/chat_completion.py | 14 +- ...chat_completion_assistant_message_param.py | 41 ++ .../types/chat/chat_completion_chunk.py | 56 +- ...hat_completion_content_part_image_param.py | 22 + .../chat_completion_content_part_param.py | 14 + ...chat_completion_content_part_text_param.py | 15 + ...t_completion_function_call_option_param.py | 12 + .../chat_completion_function_message_param.py | 19 + .../types/chat/chat_completion_message.py | 13 +- .../chat/chat_completion_message_param.py | 57 +- .../chat/chat_completion_message_tool_call.py | 31 + ...chat_completion_message_tool_call_param.py | 31 + ...chat_completion_named_tool_choice_param.py | 19 + .../openai/types/chat/chat_completion_role.py | 2 +- .../chat_completion_system_message_param.py | 16 + ...hat_completion_tool_choice_option_param.py | 12 + .../chat_completion_tool_message_param.py | 19 + .../types/chat/chat_completion_tool_param.py | 16 + .../chat_completion_user_message_param.py | 18 + .../types/chat/completion_create_params.py | 92 ++- site-packages/openai/types/completion.py | 10 +- .../openai/types/completion_choice.py | 2 +- .../openai/types/completion_create_params.py | 12 +- .../openai/types/create_embedding_response.py | 5 +- site-packages/openai/types/edit.py | 2 +- site-packages/openai/types/embedding.py | 3 +- .../openai/types/file_create_params.py | 17 +- site-packages/openai/types/file_deleted.py | 4 +- .../openai/types/file_list_params.py | 12 + site-packages/openai/types/file_object.py | 28 +- site-packages/openai/types/fine_tune.py | 3 +- site-packages/openai/types/fine_tune_event.py | 4 +- .../types/fine_tune_events_list_response.py | 3 +- .../openai/types/fine_tuning/.DS_Store | Bin 6148 -> 0 bytes .../types/fine_tuning/fine_tuning_job.py | 4 +- .../fine_tuning/fine_tuning_job_event.py | 2 +- .../types/fine_tuning/job_create_params.py | 13 + site-packages/openai/types/image.py | 6 + .../types/image_create_variation_params.py | 13 +- .../openai/types/image_edit_params.py | 8 +- .../openai/types/image_generate_params.py | 34 +- site-packages/openai/types/model.py | 4 +- site-packages/openai/types/shared/__init__.py | 4 + .../types/shared/function_definition.py | 35 + .../types/shared/function_parameters.py | 7 + .../openai/types/shared_params/__init__.py | 4 + .../shared_params/function_definition.py | 36 + .../shared_params/function_parameters.py | 9 + site-packages/pkg_resources/.DS_Store | Bin 6148 -> 0 bytes site-packages/pkg_resources/_vendor/.DS_Store | Bin 6148 -> 0 bytes .../_vendor/importlib_resources/.DS_Store | Bin 6148 -> 0 bytes .../pkg_resources/_vendor/jaraco/.DS_Store | Bin 6148 -> 0 bytes .../_vendor/jaraco/text/.DS_Store | Bin 6148 -> 0 bytes .../_vendor/more_itertools/.DS_Store | Bin 6148 -> 0 bytes .../pkg_resources/_vendor/packaging/.DS_Store | Bin 6148 -> 0 bytes .../_vendor/platformdirs/.DS_Store | Bin 6148 -> 0 bytes site-packages/pkg_resources/extern/.DS_Store | Bin 6148 -> 0 bytes site-packages/pydantic/.DS_Store | Bin 6148 -> 0 bytes site-packages/pygments/.DS_Store | Bin 6148 -> 0 bytes site-packages/pygments/filters/.DS_Store | Bin 6148 -> 0 bytes site-packages/pygments/formatters/.DS_Store | Bin 6148 -> 0 bytes site-packages/pygments/lexers/.DS_Store | Bin 6148 -> 0 bytes site-packages/pygments/styles/.DS_Store | Bin 6148 -> 0 bytes site-packages/requests/.DS_Store | Bin 6148 -> 0 bytes site-packages/rich/.DS_Store | Bin 6148 -> 0 bytes .../rich_click-1.7.1.dist-info/RECORD | 2 +- site-packages/rich_click/.DS_Store | Bin 6148 -> 0 bytes site-packages/smmap/.DS_Store | Bin 6148 -> 0 bytes site-packages/smmap/test/.DS_Store | Bin 6148 -> 0 bytes site-packages/sniffio/.DS_Store | Bin 6148 -> 0 bytes site-packages/sniffio/_tests/.DS_Store | Bin 6148 -> 0 bytes site-packages/tiktoken-0.4.0.dist-info/RECORD | 2 +- .../tiktoken-0.4.0.dist-info/direct_url.json | 2 +- site-packages/tiktoken/.DS_Store | Bin 6148 -> 0 bytes site-packages/tiktoken_ext/.DS_Store | Bin 6148 -> 0 bytes site-packages/tinydb/.DS_Store | Bin 6148 -> 0 bytes site-packages/tqdm-4.66.1.dist-info/RECORD | 2 +- site-packages/tqdm/.DS_Store | Bin 6148 -> 0 bytes site-packages/tqdm/contrib/.DS_Store | Bin 6148 -> 0 bytes site-packages/urllib3/.DS_Store | Bin 6148 -> 0 bytes site-packages/urllib3/contrib/.DS_Store | Bin 6148 -> 0 bytes .../contrib/_securetransport/.DS_Store | Bin 6148 -> 0 bytes site-packages/urllib3/packages/.DS_Store | Bin 6148 -> 0 bytes .../urllib3/packages/backports/.DS_Store | Bin 6148 -> 0 bytes site-packages/urllib3/util/.DS_Store | Bin 6148 -> 0 bytes site-packages/yaml/.DS_Store | Bin 6148 -> 0 bytes site-packages/zipp/.DS_Store | Bin 6148 -> 0 bytes 403 files changed, 10084 insertions(+), 3426 deletions(-) rename {site-packages/_distutils_hack => python-3.11.6-embed-amd64}/.DS_Store (100%) delete mode 100644 site-packages/_yaml/.DS_Store delete mode 100644 site-packages/anyio/.DS_Store delete mode 100644 site-packages/anyio/_backends/.DS_Store delete mode 100644 site-packages/anyio/_core/.DS_Store delete mode 100644 site-packages/anyio/abc/.DS_Store delete mode 100644 site-packages/anyio/streams/.DS_Store delete mode 100644 site-packages/certifi/.DS_Store delete mode 100644 site-packages/charset_normalizer/.DS_Store delete mode 100644 site-packages/charset_normalizer/cli/.DS_Store rename site-packages/{devchat-0.2.9.dist-info => colorama-0.4.6.dist-info}/INSTALLER (100%) create mode 100644 site-packages/colorama-0.4.6.dist-info/METADATA create mode 100644 site-packages/colorama-0.4.6.dist-info/RECORD rename site-packages/{importlib_resources-6.1.0.dist-info => colorama-0.4.6.dist-info}/WHEEL (56%) create mode 100644 site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt create mode 100644 site-packages/colorama/__init__.py create mode 100644 site-packages/colorama/ansi.py create mode 100644 site-packages/colorama/ansitowin32.py create mode 100644 site-packages/colorama/initialise.py create mode 100644 site-packages/colorama/tests/__init__.py create mode 100644 site-packages/colorama/tests/ansi_test.py create mode 100644 site-packages/colorama/tests/ansitowin32_test.py create mode 100644 site-packages/colorama/tests/initialise_test.py create mode 100644 site-packages/colorama/tests/isatty_test.py create mode 100644 site-packages/colorama/tests/utils.py create mode 100644 site-packages/colorama/tests/winterm_test.py create mode 100644 site-packages/colorama/win32.py create mode 100644 site-packages/colorama/winterm.py rename site-packages/{httpcore-0.18.0.dist-info => devchat-0.2.10.dist-info}/INSTALLER (100%) rename site-packages/{devchat-0.2.9.dist-info => devchat-0.2.10.dist-info}/LICENSE (100%) rename site-packages/{devchat-0.2.9.dist-info => devchat-0.2.10.dist-info}/METADATA (99%) rename site-packages/{devchat-0.2.9.dist-info => devchat-0.2.10.dist-info}/RECORD (65%) rename site-packages/{click-8.1.7.dist-info => devchat-0.2.10.dist-info}/REQUESTED (100%) rename site-packages/{devchat-0.2.9.dist-info => devchat-0.2.10.dist-info}/WHEEL (100%) rename site-packages/{devchat-0.2.9.dist-info => devchat-0.2.10.dist-info}/direct_url.json (100%) rename site-packages/{devchat-0.2.9.dist-info => devchat-0.2.10.dist-info}/entry_points.txt (100%) delete mode 100644 site-packages/devchat-0.2.9.dist-info/REQUESTED delete mode 100644 site-packages/devchat/.DS_Store delete mode 100644 site-packages/distro/.DS_Store delete mode 100644 site-packages/exceptiongroup/.DS_Store delete mode 100644 site-packages/git/.DS_Store delete mode 100644 site-packages/git/index/.DS_Store delete mode 100644 site-packages/git/objects/.DS_Store delete mode 100644 site-packages/git/objects/submodule/.DS_Store delete mode 100644 site-packages/git/refs/.DS_Store delete mode 100644 site-packages/git/repo/.DS_Store delete mode 100644 site-packages/gitdb/.DS_Store delete mode 100644 site-packages/gitdb/db/.DS_Store delete mode 100644 site-packages/gitdb/test/.DS_Store delete mode 100644 site-packages/gitdb/utils/.DS_Store delete mode 100644 site-packages/h11/.DS_Store delete mode 100644 site-packages/h11/tests/.DS_Store rename site-packages/{httpx-0.25.0.dist-info => httpcore-1.0.2.dist-info}/INSTALLER (100%) rename site-packages/{httpcore-0.18.0.dist-info => httpcore-1.0.2.dist-info}/METADATA (91%) rename site-packages/{httpcore-0.18.0.dist-info => httpcore-1.0.2.dist-info}/RECORD (79%) rename site-packages/{httpcore-0.18.0.dist-info => httpcore-1.0.2.dist-info}/WHEEL (100%) rename site-packages/{httpcore-0.18.0.dist-info => httpcore-1.0.2.dist-info}/licenses/LICENSE.md (100%) delete mode 100644 site-packages/httpcore/.DS_Store delete mode 100644 site-packages/httpcore/_async/.DS_Store delete mode 100644 site-packages/httpcore/_backends/.DS_Store delete mode 100644 site-packages/httpcore/_sync/.DS_Store rename site-packages/{importlib_resources-6.1.0.dist-info => httpx-0.25.1.dist-info}/INSTALLER (100%) rename site-packages/{httpx-0.25.0.dist-info => httpx-0.25.1.dist-info}/METADATA (88%) rename site-packages/{httpx-0.25.0.dist-info => httpx-0.25.1.dist-info}/RECORD (67%) rename site-packages/{httpx-0.25.0.dist-info => httpx-0.25.1.dist-info}/WHEEL (100%) rename site-packages/{httpx-0.25.0.dist-info => httpx-0.25.1.dist-info}/entry_points.txt (100%) rename site-packages/{httpx-0.25.0.dist-info => httpx-0.25.1.dist-info}/licenses/LICENSE.md (100%) delete mode 100644 site-packages/httpx/.DS_Store delete mode 100644 site-packages/httpx/_transports/.DS_Store delete mode 100644 site-packages/idna/.DS_Store delete mode 100644 site-packages/importlib_metadata/.DS_Store delete mode 100644 site-packages/importlib_resources-6.1.0.dist-info/LICENSE delete mode 100644 site-packages/importlib_resources-6.1.0.dist-info/METADATA delete mode 100644 site-packages/importlib_resources-6.1.0.dist-info/RECORD delete mode 100644 site-packages/importlib_resources-6.1.0.dist-info/REQUESTED delete mode 100644 site-packages/importlib_resources-6.1.0.dist-info/top_level.txt delete mode 100644 site-packages/importlib_resources/__init__.py delete mode 100644 site-packages/importlib_resources/_adapters.py delete mode 100644 site-packages/importlib_resources/_common.py delete mode 100644 site-packages/importlib_resources/_compat.py delete mode 100644 site-packages/importlib_resources/_itertools.py delete mode 100644 site-packages/importlib_resources/abc.py delete mode 100644 site-packages/importlib_resources/py.typed delete mode 100644 site-packages/importlib_resources/readers.py delete mode 100644 site-packages/importlib_resources/simple.py delete mode 100644 site-packages/importlib_resources/tests/__init__.py delete mode 100644 site-packages/importlib_resources/tests/_compat.py delete mode 100644 site-packages/importlib_resources/tests/_path.py delete mode 100644 site-packages/importlib_resources/tests/data01/__init__.py delete mode 100644 site-packages/importlib_resources/tests/data01/binary.file delete mode 100644 site-packages/importlib_resources/tests/data01/subdirectory/__init__.py delete mode 100644 site-packages/importlib_resources/tests/data01/subdirectory/binary.file delete mode 100644 site-packages/importlib_resources/tests/data01/utf-16.file delete mode 100644 site-packages/importlib_resources/tests/data01/utf-8.file delete mode 100644 site-packages/importlib_resources/tests/data02/__init__.py delete mode 100644 site-packages/importlib_resources/tests/data02/one/__init__.py delete mode 100644 site-packages/importlib_resources/tests/data02/one/resource1.txt delete mode 100644 site-packages/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt delete mode 100644 site-packages/importlib_resources/tests/data02/two/__init__.py delete mode 100644 site-packages/importlib_resources/tests/data02/two/resource2.txt delete mode 100644 site-packages/importlib_resources/tests/namespacedata01/binary.file delete mode 100644 site-packages/importlib_resources/tests/namespacedata01/utf-16.file delete mode 100644 site-packages/importlib_resources/tests/namespacedata01/utf-8.file delete mode 100644 site-packages/importlib_resources/tests/test_compatibilty_files.py delete mode 100644 site-packages/importlib_resources/tests/test_contents.py delete mode 100644 site-packages/importlib_resources/tests/test_custom.py delete mode 100644 site-packages/importlib_resources/tests/test_files.py delete mode 100644 site-packages/importlib_resources/tests/test_open.py delete mode 100644 site-packages/importlib_resources/tests/test_path.py delete mode 100644 site-packages/importlib_resources/tests/test_read.py delete mode 100644 site-packages/importlib_resources/tests/test_reader.py delete mode 100644 site-packages/importlib_resources/tests/test_resource.py delete mode 100644 site-packages/importlib_resources/tests/util.py delete mode 100644 site-packages/importlib_resources/tests/zip.py delete mode 100644 site-packages/markdown_it/.DS_Store delete mode 100644 site-packages/markdown_it/cli/.DS_Store delete mode 100644 site-packages/markdown_it/common/.DS_Store delete mode 100644 site-packages/markdown_it/helpers/.DS_Store delete mode 100644 site-packages/markdown_it/presets/.DS_Store delete mode 100644 site-packages/markdown_it/rules_block/.DS_Store delete mode 100644 site-packages/markdown_it/rules_core/.DS_Store delete mode 100644 site-packages/markdown_it/rules_inline/.DS_Store delete mode 100644 site-packages/mdurl/.DS_Store delete mode 100644 site-packages/networkx/.DS_Store delete mode 100644 site-packages/networkx/algorithms/.DS_Store delete mode 100644 site-packages/networkx/algorithms/approximation/.DS_Store delete mode 100644 site-packages/networkx/algorithms/approximation/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/assortativity/.DS_Store delete mode 100644 site-packages/networkx/algorithms/assortativity/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/bipartite/.DS_Store delete mode 100644 site-packages/networkx/algorithms/bipartite/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/centrality/.DS_Store delete mode 100644 site-packages/networkx/algorithms/centrality/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/coloring/.DS_Store delete mode 100644 site-packages/networkx/algorithms/coloring/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/community/.DS_Store delete mode 100644 site-packages/networkx/algorithms/community/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/components/.DS_Store delete mode 100644 site-packages/networkx/algorithms/components/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/connectivity/.DS_Store delete mode 100644 site-packages/networkx/algorithms/connectivity/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/flow/.DS_Store delete mode 100644 site-packages/networkx/algorithms/flow/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/isomorphism/.DS_Store delete mode 100644 site-packages/networkx/algorithms/isomorphism/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/link_analysis/.DS_Store delete mode 100644 site-packages/networkx/algorithms/link_analysis/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/minors/.DS_Store delete mode 100644 site-packages/networkx/algorithms/minors/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/operators/.DS_Store delete mode 100644 site-packages/networkx/algorithms/operators/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/shortest_paths/.DS_Store delete mode 100644 site-packages/networkx/algorithms/shortest_paths/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/traversal/.DS_Store delete mode 100644 site-packages/networkx/algorithms/traversal/tests/.DS_Store delete mode 100644 site-packages/networkx/algorithms/tree/.DS_Store delete mode 100644 site-packages/networkx/algorithms/tree/tests/.DS_Store delete mode 100644 site-packages/networkx/classes/.DS_Store delete mode 100644 site-packages/networkx/classes/tests/.DS_Store delete mode 100644 site-packages/networkx/drawing/.DS_Store delete mode 100644 site-packages/networkx/drawing/tests/.DS_Store delete mode 100644 site-packages/networkx/generators/.DS_Store delete mode 100644 site-packages/networkx/generators/tests/.DS_Store delete mode 100644 site-packages/networkx/linalg/.DS_Store delete mode 100644 site-packages/networkx/linalg/tests/.DS_Store delete mode 100644 site-packages/networkx/readwrite/.DS_Store delete mode 100644 site-packages/networkx/readwrite/json_graph/.DS_Store delete mode 100644 site-packages/networkx/readwrite/json_graph/tests/.DS_Store delete mode 100644 site-packages/networkx/readwrite/tests/.DS_Store delete mode 100644 site-packages/networkx/tests/.DS_Store delete mode 100644 site-packages/networkx/utils/.DS_Store delete mode 100644 site-packages/networkx/utils/tests/.DS_Store delete mode 100644 site-packages/openai-1.0.0rc1.dist-info/RECORD rename site-packages/{openai-1.0.0rc1.dist-info => openai-1.2.4.dist-info}/INSTALLER (100%) rename site-packages/{openai-1.0.0rc1.dist-info => openai-1.2.4.dist-info}/METADATA (92%) create mode 100644 site-packages/openai-1.2.4.dist-info/RECORD rename site-packages/{openai-1.0.0rc1.dist-info => openai-1.2.4.dist-info}/WHEEL (100%) rename site-packages/{openai-1.0.0rc1.dist-info => openai-1.2.4.dist-info}/entry_points.txt (100%) rename site-packages/{openai-1.0.0rc1.dist-info => openai-1.2.4.dist-info}/licenses/LICENSE (100%) delete mode 100644 site-packages/openai/.DS_Store delete mode 100644 site-packages/openai/_extras/.DS_Store delete mode 100644 site-packages/openai/_utils/.DS_Store delete mode 100644 site-packages/openai/cli/.DS_Store delete mode 100644 site-packages/openai/cli/_api/.DS_Store delete mode 100644 site-packages/openai/cli/_api/chat/.DS_Store delete mode 100644 site-packages/openai/cli/_tools/.DS_Store delete mode 100644 site-packages/openai/lib/.DS_Store create mode 100644 site-packages/openai/lib/_old_api.py delete mode 100644 site-packages/openai/resources/.DS_Store delete mode 100644 site-packages/openai/resources/audio/.DS_Store create mode 100644 site-packages/openai/resources/audio/speech.py create mode 100644 site-packages/openai/resources/beta/__init__.py create mode 100644 site-packages/openai/resources/beta/assistants/__init__.py create mode 100644 site-packages/openai/resources/beta/assistants/assistants.py create mode 100644 site-packages/openai/resources/beta/assistants/files.py create mode 100644 site-packages/openai/resources/beta/beta.py create mode 100644 site-packages/openai/resources/beta/threads/__init__.py create mode 100644 site-packages/openai/resources/beta/threads/messages/__init__.py create mode 100644 site-packages/openai/resources/beta/threads/messages/files.py create mode 100644 site-packages/openai/resources/beta/threads/messages/messages.py create mode 100644 site-packages/openai/resources/beta/threads/runs/__init__.py create mode 100644 site-packages/openai/resources/beta/threads/runs/runs.py create mode 100644 site-packages/openai/resources/beta/threads/runs/steps.py create mode 100644 site-packages/openai/resources/beta/threads/threads.py delete mode 100644 site-packages/openai/resources/chat/.DS_Store delete mode 100644 site-packages/openai/resources/fine_tuning/.DS_Store delete mode 100644 site-packages/openai/types/.DS_Store delete mode 100644 site-packages/openai/types/audio/.DS_Store create mode 100644 site-packages/openai/types/audio/speech_create_params.py create mode 100644 site-packages/openai/types/beta/__init__.py create mode 100644 site-packages/openai/types/beta/assistant.py create mode 100644 site-packages/openai/types/beta/assistant_create_params.py create mode 100644 site-packages/openai/types/beta/assistant_deleted.py create mode 100644 site-packages/openai/types/beta/assistant_list_params.py create mode 100644 site-packages/openai/types/beta/assistant_update_params.py create mode 100644 site-packages/openai/types/beta/assistants/__init__.py create mode 100644 site-packages/openai/types/beta/assistants/assistant_file.py create mode 100644 site-packages/openai/types/beta/assistants/file_create_params.py create mode 100644 site-packages/openai/types/beta/assistants/file_delete_response.py create mode 100644 site-packages/openai/types/beta/assistants/file_list_params.py create mode 100644 site-packages/openai/types/beta/chat/__init__.py create mode 100644 site-packages/openai/types/beta/thread.py create mode 100644 site-packages/openai/types/beta/thread_create_and_run_params.py create mode 100644 site-packages/openai/types/beta/thread_create_params.py create mode 100644 site-packages/openai/types/beta/thread_deleted.py create mode 100644 site-packages/openai/types/beta/thread_update_params.py create mode 100644 site-packages/openai/types/beta/threads/__init__.py create mode 100644 site-packages/openai/types/beta/threads/message_content_image_file.py create mode 100644 site-packages/openai/types/beta/threads/message_content_text.py create mode 100644 site-packages/openai/types/beta/threads/message_create_params.py create mode 100644 site-packages/openai/types/beta/threads/message_list_params.py create mode 100644 site-packages/openai/types/beta/threads/message_update_params.py create mode 100644 site-packages/openai/types/beta/threads/messages/__init__.py create mode 100644 site-packages/openai/types/beta/threads/messages/file_list_params.py create mode 100644 site-packages/openai/types/beta/threads/messages/message_file.py create mode 100644 site-packages/openai/types/beta/threads/required_action_function_tool_call.py create mode 100644 site-packages/openai/types/beta/threads/run.py create mode 100644 site-packages/openai/types/beta/threads/run_create_params.py create mode 100644 site-packages/openai/types/beta/threads/run_list_params.py create mode 100644 site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py create mode 100644 site-packages/openai/types/beta/threads/run_update_params.py create mode 100644 site-packages/openai/types/beta/threads/runs/__init__.py create mode 100644 site-packages/openai/types/beta/threads/runs/code_tool_call.py create mode 100644 site-packages/openai/types/beta/threads/runs/function_tool_call.py create mode 100644 site-packages/openai/types/beta/threads/runs/message_creation_step_details.py create mode 100644 site-packages/openai/types/beta/threads/runs/retrieval_tool_call.py create mode 100644 site-packages/openai/types/beta/threads/runs/run_step.py create mode 100644 site-packages/openai/types/beta/threads/runs/step_list_params.py create mode 100644 site-packages/openai/types/beta/threads/runs/tool_calls_step_details.py create mode 100644 site-packages/openai/types/beta/threads/thread_message.py delete mode 100644 site-packages/openai/types/chat/.DS_Store create mode 100644 site-packages/openai/types/chat/chat_completion_assistant_message_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_content_part_image_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_content_part_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_content_part_text_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_function_call_option_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_function_message_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_message_tool_call.py create mode 100644 site-packages/openai/types/chat/chat_completion_message_tool_call_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_system_message_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_tool_message_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_tool_param.py create mode 100644 site-packages/openai/types/chat/chat_completion_user_message_param.py create mode 100644 site-packages/openai/types/file_list_params.py delete mode 100644 site-packages/openai/types/fine_tuning/.DS_Store create mode 100644 site-packages/openai/types/shared/__init__.py create mode 100644 site-packages/openai/types/shared/function_definition.py create mode 100644 site-packages/openai/types/shared/function_parameters.py create mode 100644 site-packages/openai/types/shared_params/__init__.py create mode 100644 site-packages/openai/types/shared_params/function_definition.py create mode 100644 site-packages/openai/types/shared_params/function_parameters.py delete mode 100644 site-packages/pkg_resources/.DS_Store delete mode 100644 site-packages/pkg_resources/_vendor/.DS_Store delete mode 100644 site-packages/pkg_resources/_vendor/importlib_resources/.DS_Store delete mode 100644 site-packages/pkg_resources/_vendor/jaraco/.DS_Store delete mode 100644 site-packages/pkg_resources/_vendor/jaraco/text/.DS_Store delete mode 100644 site-packages/pkg_resources/_vendor/more_itertools/.DS_Store delete mode 100644 site-packages/pkg_resources/_vendor/packaging/.DS_Store delete mode 100644 site-packages/pkg_resources/_vendor/platformdirs/.DS_Store delete mode 100644 site-packages/pkg_resources/extern/.DS_Store delete mode 100644 site-packages/pydantic/.DS_Store delete mode 100644 site-packages/pygments/.DS_Store delete mode 100644 site-packages/pygments/filters/.DS_Store delete mode 100644 site-packages/pygments/formatters/.DS_Store delete mode 100644 site-packages/pygments/lexers/.DS_Store delete mode 100644 site-packages/pygments/styles/.DS_Store delete mode 100644 site-packages/requests/.DS_Store delete mode 100644 site-packages/rich/.DS_Store delete mode 100644 site-packages/rich_click/.DS_Store delete mode 100644 site-packages/smmap/.DS_Store delete mode 100644 site-packages/smmap/test/.DS_Store delete mode 100644 site-packages/sniffio/.DS_Store delete mode 100644 site-packages/sniffio/_tests/.DS_Store delete mode 100644 site-packages/tiktoken/.DS_Store delete mode 100644 site-packages/tiktoken_ext/.DS_Store delete mode 100644 site-packages/tinydb/.DS_Store delete mode 100644 site-packages/tqdm/.DS_Store delete mode 100644 site-packages/tqdm/contrib/.DS_Store delete mode 100644 site-packages/urllib3/.DS_Store delete mode 100644 site-packages/urllib3/contrib/.DS_Store delete mode 100644 site-packages/urllib3/contrib/_securetransport/.DS_Store delete mode 100644 site-packages/urllib3/packages/.DS_Store delete mode 100644 site-packages/urllib3/packages/backports/.DS_Store delete mode 100644 site-packages/urllib3/util/.DS_Store delete mode 100644 site-packages/yaml/.DS_Store delete mode 100644 site-packages/zipp/.DS_Store diff --git a/.DS_Store b/.DS_Store index 0690478edcffb0e25faf2b682548a5c19f3d06ab..fed83821c0217ee334bbabdcb6d50bc3c1fa6ea3 100644 GIT binary patch delta 1257 zcmeH{TWm~07{|Z=^s?tLt+CzOi$&F)ZF|9=W#8f@~2Jy#CdyKIXMUxwfC%a*hvdCb04_So~} za9W0)r+BJ1RM+s@jaEZ*q|)jzWtO8cST96t)Ld_dLout(Tp5a)(RyKTv&{DLmYCHX ziB_AtqpF`#Y^X!1re8^QdZ*8tpR;JmvK6a>>jox>iS{%H=klqe3|u4wwwi7xfIS>N<=_bI7cOTOG;M<`I_G(`2Sjh zoanL?>%Fe5T0T=#3`Np_%k7!$ox|s8ik?W0N1r!a6Sp+M#v=E71G=&z65`S4&C@l3 zcqrh7y7D6u<8ca?a6A7D{0kbScQizwX#~j_4}ocz3j?_*#wwI3)MZ$UYSf@s0UyQP zi0!b@gcw>C@;&Il0d(Ua4&kuEE*1Du?0qzxJYw`F`woMA;{ phA{}1keHNg)AVuU|GW8DZZdy$qx_^$lq`yoslKun3Q+?!{QzPaHX#52 literal 10244 zcmeHMYitx%6h3F#(pksSX(<-gEv{54kEQgOG&Jk+Rv3{ex~k6Mk87)|_TVxlC*L`;n5&YdN6TN)CBLY&*o zJ#$|7&iUq?JH2-S0Am@o9v}<=B3+Eiw^KJmV|sQ?Yl0`4OBBf;U>Fo|5~Fr1VNA33 z1wH~k0zLvh0zLvh0yhH!^vz~XlsNT!9|0c$AAu_fu=OEK7o$;+j&tfR9n`s70HTE? zb{pN(KEU+}do=3NaZX)V8k6rH&~rs^i-Fvo^kHUBH0sfDPPscEcL(%lMsJ6L?(F0j zcFh5!ocg_wfRDg*1lZoa7*x<83(2eZ?^c2|=MpJHx1GAWZ%`;GESgg+7K@U&H$H5Q z#vR4U*fAx&n{nNiWh8R44ij=rv*Z<)zTy$-9?@Kv*kC)uQVAZO!g z)3D4T(y%B3pu-5Hz=AY5U=Ww!$0+Xey!Wrah6Q6|&Gn6uaO0Y_v%Cj}Gqr2JhvTH2 zA+o&K7gF4ZDe7U0-h`FV2(?g8f3>ihIE`>gP_NRxLiZ|3n^Ze#Bq-(B>?L3mTn?G6 zq=6L5;)B!=5pVz35H>W_N5YiO{}94iBFq87a%dJa!_Kzy+%9O&<^BF z&#vCS{y|yJ*jkrnD2|@h+7w5#2jx0`CUiBQ7)C8q&o5GS6&u~RVlDZGl>L%OXRCV=Iu*XmmnjxzJK2P&%DcGCkQOO;i zaD9ao?4#9bghH`B*~U%5;666(HpO;!rZkft=`(NcR?-dF!%AA)IZCUmC*4d^_hpfs zIkl_0W95_`6h%K9u0>Vd7|P|c(qLz>OOz)>dh5EG)oDd1togz&R!2{sVC4nX(d90M zYEA%LK&zv5oUh154a3} zVj-4bDau%eL9D@LxExnt6GpKGW7vV6*oATI#=V$86*WA77G`i158+{a0*~Pdd>Ws@ z=kOJL6<@=5@B{o1Kf;gkQ=GuBa1y`6U+`DFgqMXvp-7k`lnSy?F3b~_2sJ{Tuug~x zn}tsP>_YB?GB7nJK_Pzj@}d)Owp0tJW-hPL<_EWI?bvqHme+;3yrAaaQ5j}cwsu3y zm4xIOoJ-oPOTo?mDOKamS|(D-$a6Rw3Dt^Jq_W_$Qhlf~K&2=ba5fTJ6A)$O%zEkm02QTN$k{3>8VJbX zGNIN`yGVs4i#ICcKZgnU7Jh)ADdYdb0u-?l!?+Y{aV>7dO_bxU*p54JCq6`(&hmU8 z?necaXy6FtdJ5AxhL7S=d<-9_d_PVZ&+`3wd;wp?m+%dI6W_wO@m)NPXYm}qPZjV9 zet}=(Mf@=@ueVRlYy55Yyw3YjPj&V=9|0c$9|0eMTNQx<{@`JD{@;G>-~Vsb@cifY z5%3YX4g|2UE#4L*!`${K--n&Ghw0i+7i-LJoKx3@I%dM;?r}UZ>^Od@C-XLnu0HC? e#yNG}$Tep{(sWn|Np=D|9=C#pkSW> diff --git a/site-packages/_distutils_hack/.DS_Store b/python-3.11.6-embed-amd64/.DS_Store similarity index 100% rename from site-packages/_distutils_hack/.DS_Store rename to python-3.11.6-embed-amd64/.DS_Store diff --git a/site-packages/.DS_Store b/site-packages/.DS_Store index fcb138bc0a4f311f194a23f24ada672a9ac9a874..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 100644 GIT binary patch delta 69 zcmZoMXfc=|#>AjHu~2NHo+1YW5HK<@2yFhyEXJ~lWft>hb`E|Hpgd6EJM(0I5k*d* SG(!SN-DDFU<;^i7E0_Tq_YVO8 literal 6148 zcmeHK%}T>S5Z>*NO(;SS3Oz1(E!ajYh?h|73mDOZN=-=BV45vWYYwH5v%Zi|;`2DO zyOI9rRa9oe?6*5RlQ3VxPKGhYd$X{^Sc5TUfg+Y{7`_nPN8ON;u~Y!Ly2m)k(s2;W zV8fRTzmoxccU6|M5nMi7et&PlIF8b3yZz2f*w*ge{>k~^DIO*AMH9*4i&L^~uz**XEM@fUO_Eq94`9WJ8swU= zh~2{#F}d`;{^fkvvRcczV1|$wAO?tmf&p`aTJ^#}NDL4I#DHM{&j$gD=o-v5s-pu2 z)dB$GZ=@2KV=sX*!k}v~*9bcxT!#YcP;OcbuEW7DjGt>T*Qmo8H_Zn(Pv)jW;p*vd zf1%tNcQw*T3=jjW43zbH1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 +License-File: LICENSE.txt +Keywords: ansi,color,colour,crossplatform,terminal,text,windows,xplatform +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Terminals +Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7 +Description-Content-Type: text/x-rst + +.. image:: https://img.shields.io/pypi/v/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Supported Python versions + +.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg + :target: https://github.com/tartley/colorama/actions/workflows/test.yml + :alt: Build Status + +Colorama +======== + +Makes ANSI escape character sequences (for producing colored terminal text and +cursor positioning) work under MS Windows. + +.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif + :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama¤cy_code=USD + :alt: Donate with Paypal + +`PyPI for releases `_ | +`Github for source `_ | +`Colorama for enterprise on Tidelift `_ + +If you find Colorama useful, please |donate| to the authors. Thank you! + +Installation +------------ + +Tested on CPython 2.7, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.8. + +No requirements other than the standard library. + +.. code-block:: bash + + pip install colorama + # or + conda install -c anaconda colorama + +Description +----------- + +ANSI escape character sequences have long been used to produce colored terminal +text and cursor positioning on Unix and Macs. Colorama makes this work on +Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which +would appear as gobbledygook in the output), and converting them into the +appropriate win32 calls to modify the state of the terminal. On other platforms, +Colorama does nothing. + +This has the upshot of providing a simple cross-platform API for printing +colored terminal text from Python, and has the happy side-effect that existing +applications or libraries which use ANSI sequences to produce colored output on +Linux or Macs can now also work on Windows, simply by calling +``colorama.just_fix_windows_console()`` (since v0.4.6) or ``colorama.init()`` +(all versions, but may have other side-effects – see below). + +An alternative approach is to install ``ansi.sys`` on Windows machines, which +provides the same behaviour for all applications running in terminals. Colorama +is intended for situations where that isn't easy (e.g., maybe your app doesn't +have an installer.) + +Demo scripts in the source code repository print some colored text using +ANSI sequences. Compare their output under Gnome-terminal's built in ANSI +handling, versus on Windows Command-Prompt using Colorama: + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png + :width: 661 + :height: 357 + :alt: ANSI sequences on Ubuntu under gnome-terminal. + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png + :width: 668 + :height: 325 + :alt: Same ANSI sequences on Windows, using Colorama. + +These screenshots show that, on Windows, Colorama does not support ANSI 'dim +text'; it looks the same as 'normal text'. + +Usage +----- + +Initialisation +.............. + +If the only thing you want from Colorama is to get ANSI escapes to work on +Windows, then run: + +.. code-block:: python + + from colorama import just_fix_windows_console + just_fix_windows_console() + +If you're on a recent version of Windows 10 or better, and your stdout/stderr +are pointing to a Windows console, then this will flip the magic configuration +switch to enable Windows' built-in ANSI support. + +If you're on an older version of Windows, and your stdout/stderr are pointing to +a Windows console, then this will wrap ``sys.stdout`` and/or ``sys.stderr`` in a +magic file object that intercepts ANSI escape sequences and issues the +appropriate Win32 calls to emulate them. + +In all other circumstances, it does nothing whatsoever. Basically the idea is +that this makes Windows act like Unix with respect to ANSI escape handling. + +It's safe to call this function multiple times. It's safe to call this function +on non-Windows platforms, but it won't do anything. It's safe to call this +function when one or both of your stdout/stderr are redirected to a file – it +won't do anything to those streams. + +Alternatively, you can use the older interface with more features (but also more +potential footguns): + +.. code-block:: python + + from colorama import init + init() + +This does the same thing as ``just_fix_windows_console``, except for the +following differences: + +- It's not safe to call ``init`` multiple times; you can end up with multiple + layers of wrapping and broken ANSI support. + +- Colorama will apply a heuristic to guess whether stdout/stderr support ANSI, + and if it thinks they don't, then it will wrap ``sys.stdout`` and + ``sys.stderr`` in a magic file object that strips out ANSI escape sequences + before printing them. This happens on all platforms, and can be convenient if + you want to write your code to emit ANSI escape sequences unconditionally, and + let Colorama decide whether they should actually be output. But note that + Colorama's heuristic is not particularly clever. + +- ``init`` also accepts explicit keyword args to enable/disable various + functionality – see below. + +To stop using Colorama before your program exits, simply call ``deinit()``. +This will restore ``stdout`` and ``stderr`` to their original values, so that +Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is +cheaper than calling ``init()`` again (but does the same thing). + +Most users should depend on ``colorama >= 0.4.6``, and use +``just_fix_windows_console``. The old ``init`` interface will be supported +indefinitely for backwards compatibility, but we don't plan to fix any issues +with it, also for backwards compatibility. + +Colored Output +.............. + +Cross-platform printing of colored text can then be done using Colorama's +constant shorthand for ANSI escape sequences. These are deliberately +rudimentary, see below. + +.. code-block:: python + + from colorama import Fore, Back, Style + print(Fore.RED + 'some red text') + print(Back.GREEN + 'and with a green background') + print(Style.DIM + 'and in dim text') + print(Style.RESET_ALL) + print('back to normal now') + +...or simply by manually printing ANSI sequences from your own code: + +.. code-block:: python + + print('\033[31m' + 'some red text') + print('\033[39m') # and reset to default color + +...or, Colorama can be used in conjunction with existing ANSI libraries +such as the venerable `Termcolor `_ +the fabulous `Blessings `_, +or the incredible `_Rich `_. + +If you wish Colorama's Fore, Back and Style constants were more capable, +then consider using one of the above highly capable libraries to generate +colors, etc, and use Colorama just for its primary purpose: to convert +those ANSI sequences to also work on Windows: + +SIMILARLY, do not send PRs adding the generation of new ANSI types to Colorama. +We are only interested in converting ANSI codes to win32 API calls, not +shortcuts like the above to generate ANSI characters. + +.. code-block:: python + + from colorama import just_fix_windows_console + from termcolor import colored + + # use Colorama to make Termcolor work on Windows too + just_fix_windows_console() + + # then use Termcolor for all colored text output + print(colored('Hello, World!', 'green', 'on_red')) + +Available formatting constants are:: + + Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Style: DIM, NORMAL, BRIGHT, RESET_ALL + +``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will +perform this reset automatically on program exit. + +These are fairly well supported, but not part of the standard:: + + Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + +Cursor Positioning +.................. + +ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for +an example of how to generate them. + +Init Keyword Args +................. + +``init()`` accepts some ``**kwargs`` to override default behaviour. + +init(autoreset=False): + If you find yourself repeatedly sending reset sequences to turn off color + changes at the end of every print, then ``init(autoreset=True)`` will + automate that: + + .. code-block:: python + + from colorama import init + init(autoreset=True) + print(Fore.RED + 'some red text') + print('automatically back to default color again') + +init(strip=None): + Pass ``True`` or ``False`` to override whether ANSI codes should be + stripped from the output. The default behaviour is to strip if on Windows + or if output is redirected (not a tty). + +init(convert=None): + Pass ``True`` or ``False`` to override whether to convert ANSI codes in the + output into win32 calls. The default behaviour is to convert if on Windows + and output is to a tty (terminal). + +init(wrap=True): + On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr`` + with proxy objects, which override the ``.write()`` method to do their work. + If this wrapping causes you problems, then this can be disabled by passing + ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or + ``strip`` or ``convert`` are True. + + When wrapping is disabled, colored printing on non-Windows platforms will + continue to work as normal. To do cross-platform colored output, you can + use Colorama's ``AnsiToWin32`` proxy directly: + + .. code-block:: python + + import sys + from colorama import init, AnsiToWin32 + init(wrap=False) + stream = AnsiToWin32(sys.stderr).stream + + # Python 2 + print >>stream, Fore.BLUE + 'blue text on stderr' + + # Python 3 + print(Fore.BLUE + 'blue text on stderr', file=stream) + +Recognised ANSI Sequences +......................... + +ANSI sequences generally take the form:: + + ESC [ ; ... + +Where ```` is an integer, and ```` is a single letter. Zero or +more params are passed to a ````. If no params are passed, it is +generally synonymous with passing a single zero. No spaces exist in the +sequence; they have been inserted here simply to read more easily. + +The only ANSI sequences that Colorama converts into win32 calls are:: + + ESC [ 0 m # reset all (colors and brightness) + ESC [ 1 m # bright + ESC [ 2 m # dim (looks same as normal brightness) + ESC [ 22 m # normal brightness + + # FOREGROUND: + ESC [ 30 m # black + ESC [ 31 m # red + ESC [ 32 m # green + ESC [ 33 m # yellow + ESC [ 34 m # blue + ESC [ 35 m # magenta + ESC [ 36 m # cyan + ESC [ 37 m # white + ESC [ 39 m # reset + + # BACKGROUND + ESC [ 40 m # black + ESC [ 41 m # red + ESC [ 42 m # green + ESC [ 43 m # yellow + ESC [ 44 m # blue + ESC [ 45 m # magenta + ESC [ 46 m # cyan + ESC [ 47 m # white + ESC [ 49 m # reset + + # cursor positioning + ESC [ y;x H # position cursor at x across, y down + ESC [ y;x f # position cursor at x across, y down + ESC [ n A # move cursor n lines up + ESC [ n B # move cursor n lines down + ESC [ n C # move cursor n characters forward + ESC [ n D # move cursor n characters backward + + # clear the screen + ESC [ mode J # clear the screen + + # clear the line + ESC [ mode K # clear the line + +Multiple numeric params to the ``'m'`` command can be combined into a single +sequence:: + + ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background + +All other ANSI sequences of the form ``ESC [ ; ... `` +are silently stripped from the output on Windows. + +Any other form of ANSI sequence, such as single-character codes or alternative +initial characters, are not recognised or stripped. It would be cool to add +them though. Let me know if it would be useful for you, via the Issues on +GitHub. + +Status & Known Problems +----------------------- + +I've personally only tested it on Windows XP (CMD, Console2), Ubuntu +(gnome-terminal, xterm), and OS X. + +Some valid ANSI sequences aren't recognised. + +If you're hacking on the code, see `README-hacking.md`_. ESPECIALLY, see the +explanation there of why we do not want PRs that allow Colorama to generate new +types of ANSI codes. + +See outstanding issues and wish-list: +https://github.com/tartley/colorama/issues + +If anything doesn't work for you, or doesn't do what you expected or hoped for, +I'd love to hear about it on that issues list, would be delighted by patches, +and would be happy to grant commit access to anyone who submits a working patch +or two. + +.. _README-hacking.md: README-hacking.md + +License +------- + +Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see +LICENSE file. + +Professional support +-------------------- + +.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for colorama is available as part of the + `Tidelift Subscription`_. + Tidelift gives software development teams a single source for purchasing + and maintaining their software, with professional grade assurances from + the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +Thanks +------ + +See the CHANGELOG for more thanks! + +* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5. +* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``, + providing a solution to issue #7's setuptools/distutils debate, + and other fixes. +* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``. +* Matthew McCormick for politely pointing out a longstanding crash on non-Win. +* Ben Hoyt, for a magnificent fix under 64-bit Windows. +* Jesse at Empty Square for submitting a fix for examples in the README. +* User 'jamessp', an observant documentation fix for cursor positioning. +* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7 + fix. +* Julien Stuyck, for wisely suggesting Python3 compatible updates to README. +* Daniel Griffith for multiple fabulous patches. +* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty + output. +* Roger Binns, for many suggestions, valuable feedback, & bug reports. +* Tim Golden for thought and much appreciated feedback on the initial idea. +* User 'Zearin' for updates to the README file. +* John Szakmeister for adding support for light colors +* Charles Merriam for adding documentation to demos +* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes +* Florian Bruhin for a fix when stdout or stderr are None +* Thomas Weininger for fixing ValueError on Windows +* Remi Rampin for better Github integration and fixes to the README file +* Simeon Visser for closing a file handle using 'with' and updating classifiers + to include Python 3.3 and 3.4 +* Andy Neff for fixing RESET of LIGHT_EX colors. +* Jonathan Hartley for the initial idea and implementation. diff --git a/site-packages/colorama-0.4.6.dist-info/RECORD b/site-packages/colorama-0.4.6.dist-info/RECORD new file mode 100644 index 0000000..b0868a8 --- /dev/null +++ b/site-packages/colorama-0.4.6.dist-info/RECORD @@ -0,0 +1,31 @@ +colorama-0.4.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +colorama-0.4.6.dist-info/METADATA,sha256=e67SnrUMOym9sz_4TjF3vxvAV4T3aF7NyqRHHH3YEMw,17158 +colorama-0.4.6.dist-info/RECORD,, +colorama-0.4.6.dist-info/WHEEL,sha256=cdcF4Fbd0FPtw2EMIOwH-3rSOTUdTCeOSXRMD1iLUb8,105 +colorama-0.4.6.dist-info/licenses/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491 +colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266 +colorama/__pycache__/__init__.cpython-39.pyc,, +colorama/__pycache__/ansi.cpython-39.pyc,, +colorama/__pycache__/ansitowin32.cpython-39.pyc,, +colorama/__pycache__/initialise.cpython-39.pyc,, +colorama/__pycache__/win32.cpython-39.pyc,, +colorama/__pycache__/winterm.cpython-39.pyc,, +colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 +colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128 +colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325 +colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75 +colorama/tests/__pycache__/__init__.cpython-39.pyc,, +colorama/tests/__pycache__/ansi_test.cpython-39.pyc,, +colorama/tests/__pycache__/ansitowin32_test.cpython-39.pyc,, +colorama/tests/__pycache__/initialise_test.cpython-39.pyc,, +colorama/tests/__pycache__/isatty_test.cpython-39.pyc,, +colorama/tests/__pycache__/utils.cpython-39.pyc,, +colorama/tests/__pycache__/winterm_test.cpython-39.pyc,, +colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839 +colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678 +colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741 +colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866 +colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079 +colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709 +colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181 +colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134 diff --git a/site-packages/importlib_resources-6.1.0.dist-info/WHEEL b/site-packages/colorama-0.4.6.dist-info/WHEEL similarity index 56% rename from site-packages/importlib_resources-6.1.0.dist-info/WHEEL rename to site-packages/colorama-0.4.6.dist-info/WHEEL index 7e68873..d79189f 100644 --- a/site-packages/importlib_resources-6.1.0.dist-info/WHEEL +++ b/site-packages/colorama-0.4.6.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.41.2) +Generator: hatchling 1.11.1 Root-Is-Purelib: true +Tag: py2-none-any Tag: py3-none-any - diff --git a/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt b/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..3105888 --- /dev/null +++ b/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2010 Jonathan Hartley +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holders, nor those of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/site-packages/colorama/__init__.py b/site-packages/colorama/__init__.py new file mode 100644 index 0000000..383101c --- /dev/null +++ b/site-packages/colorama/__init__.py @@ -0,0 +1,7 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.4.6' + diff --git a/site-packages/colorama/ansi.py b/site-packages/colorama/ansi.py new file mode 100644 index 0000000..11ec695 --- /dev/null +++ b/site-packages/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\a' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/site-packages/colorama/ansitowin32.py b/site-packages/colorama/ansitowin32.py new file mode 100644 index 0000000..abf209e --- /dev/null +++ b/site-packages/colorama/ansitowin32.py @@ -0,0 +1,277 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL +from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def __enter__(self, *args, **kwargs): + # special method lookup bypasses __getattr__/__getattribute__, see + # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit + # thus, contextlib magic methods are not proxied via __getattr__ + return self.__wrapped.__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + return self.__wrapped.__exit__(*args, **kwargs) + + def __setstate__(self, state): + self.__dict__ = state + + def __getstate__(self): + return self.__dict__ + + def write(self, text): + self.__convertor.write(text) + + def isatty(self): + stream = self.__wrapped + if 'PYCHARM_HOSTED' in os.environ: + if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): + return True + try: + stream_isatty = stream.isatty + except AttributeError: + return False + else: + return stream_isatty() + + @property + def closed(self): + stream = self.__wrapped + try: + return stream.closed + # AttributeError in the case that the stream doesn't support being closed + # ValueError for the case that the stream has already been detached when atexit runs + except (AttributeError, ValueError): + return True + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + try: + fd = wrapped.fileno() + except Exception: + fd = -1 + system_has_native_ansi = not on_windows or enable_vt_processing(fd) + have_tty = not self.stream.closed and self.stream.isatty() + need_conversion = conversion_supported and not system_has_native_ansi + + # should we strip ANSI sequences from our output? + if strip is None: + strip = need_conversion or not have_tty + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = need_conversion and have_tty + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not self.stream.closed: + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command == BEL: + if paramstring.count(";") == 1: + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text + + + def flush(self): + self.wrapped.flush() diff --git a/site-packages/colorama/initialise.py b/site-packages/colorama/initialise.py new file mode 100644 index 0000000..d5fd4b7 --- /dev/null +++ b/site-packages/colorama/initialise.py @@ -0,0 +1,121 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +def _wipe_internal_state_for_tests(): + global orig_stdout, orig_stderr + orig_stdout = None + orig_stderr = None + + global wrapped_stdout, wrapped_stderr + wrapped_stdout = None + wrapped_stderr = None + + global atexit_done + atexit_done = False + + global fixed_windows_console + fixed_windows_console = False + + try: + # no-op if it wasn't registered + atexit.unregister(reset_all) + except AttributeError: + # python 2: no atexit.unregister. Oh well, we did our best. + pass + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +def just_fix_windows_console(): + global fixed_windows_console + + if sys.platform != "win32": + return + if fixed_windows_console: + return + if wrapped_stdout is not None or wrapped_stderr is not None: + # Someone already ran init() and it did stuff, so we won't second-guess them + return + + # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the + # native ANSI support in the console as a side-effect. We only need to actually + # replace sys.stdout/stderr if we're in the old-style conversion mode. + new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) + if new_stdout.convert: + sys.stdout = new_stdout + new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) + if new_stderr.convert: + sys.stderr = new_stderr + + fixed_windows_console = True + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream + + +# Use this for initial setup as well, to reduce code duplication +_wipe_internal_state_for_tests() diff --git a/site-packages/colorama/tests/__init__.py b/site-packages/colorama/tests/__init__.py new file mode 100644 index 0000000..8c5661e --- /dev/null +++ b/site-packages/colorama/tests/__init__.py @@ -0,0 +1 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. diff --git a/site-packages/colorama/tests/ansi_test.py b/site-packages/colorama/tests/ansi_test.py new file mode 100644 index 0000000..0a20c80 --- /dev/null +++ b/site-packages/colorama/tests/ansi_test.py @@ -0,0 +1,76 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansi import Back, Fore, Style +from ..ansitowin32 import AnsiToWin32 + +stdout_orig = sys.stdout +stderr_orig = sys.stderr + + +class AnsiTest(TestCase): + + def setUp(self): + # sanity check: stdout should be a file or StringIO object. + # It will only be AnsiToWin32 if init() has previously wrapped it + self.assertNotEqual(type(sys.stdout), AnsiToWin32) + self.assertNotEqual(type(sys.stderr), AnsiToWin32) + + def tearDown(self): + sys.stdout = stdout_orig + sys.stderr = stderr_orig + + + def testForeAttributes(self): + self.assertEqual(Fore.BLACK, '\033[30m') + self.assertEqual(Fore.RED, '\033[31m') + self.assertEqual(Fore.GREEN, '\033[32m') + self.assertEqual(Fore.YELLOW, '\033[33m') + self.assertEqual(Fore.BLUE, '\033[34m') + self.assertEqual(Fore.MAGENTA, '\033[35m') + self.assertEqual(Fore.CYAN, '\033[36m') + self.assertEqual(Fore.WHITE, '\033[37m') + self.assertEqual(Fore.RESET, '\033[39m') + + # Check the light, extended versions. + self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m') + self.assertEqual(Fore.LIGHTRED_EX, '\033[91m') + self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m') + self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m') + self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m') + self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m') + self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m') + self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m') + + + def testBackAttributes(self): + self.assertEqual(Back.BLACK, '\033[40m') + self.assertEqual(Back.RED, '\033[41m') + self.assertEqual(Back.GREEN, '\033[42m') + self.assertEqual(Back.YELLOW, '\033[43m') + self.assertEqual(Back.BLUE, '\033[44m') + self.assertEqual(Back.MAGENTA, '\033[45m') + self.assertEqual(Back.CYAN, '\033[46m') + self.assertEqual(Back.WHITE, '\033[47m') + self.assertEqual(Back.RESET, '\033[49m') + + # Check the light, extended versions. + self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m') + self.assertEqual(Back.LIGHTRED_EX, '\033[101m') + self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m') + self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m') + self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m') + self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m') + self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m') + self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m') + + + def testStyleAttributes(self): + self.assertEqual(Style.DIM, '\033[2m') + self.assertEqual(Style.NORMAL, '\033[22m') + self.assertEqual(Style.BRIGHT, '\033[1m') + + +if __name__ == '__main__': + main() diff --git a/site-packages/colorama/tests/ansitowin32_test.py b/site-packages/colorama/tests/ansitowin32_test.py new file mode 100644 index 0000000..91ca551 --- /dev/null +++ b/site-packages/colorama/tests/ansitowin32_test.py @@ -0,0 +1,294 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from io import StringIO, TextIOWrapper +from unittest import TestCase, main +try: + from contextlib import ExitStack +except ImportError: + # python 2 + from contextlib2 import ExitStack + +try: + from unittest.mock import MagicMock, Mock, patch +except ImportError: + from mock import MagicMock, Mock, patch + +from ..ansitowin32 import AnsiToWin32, StreamWrapper +from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING +from .utils import osname + + +class StreamWrapperTest(TestCase): + + def testIsAProxy(self): + mockStream = Mock() + wrapper = StreamWrapper(mockStream, None) + self.assertTrue( wrapper.random_attr is mockStream.random_attr ) + + def testDelegatesWrite(self): + mockStream = Mock() + mockConverter = Mock() + wrapper = StreamWrapper(mockStream, mockConverter) + wrapper.write('hello') + self.assertTrue(mockConverter.write.call_args, (('hello',), {})) + + def testDelegatesContext(self): + mockConverter = Mock() + s = StringIO() + with StreamWrapper(s, mockConverter) as fp: + fp.write(u'hello') + self.assertTrue(s.closed) + + def testProxyNoContextManager(self): + mockStream = MagicMock() + mockStream.__enter__.side_effect = AttributeError() + mockConverter = Mock() + with self.assertRaises(AttributeError) as excinfo: + with StreamWrapper(mockStream, mockConverter) as wrapper: + wrapper.write('hello') + + def test_closed_shouldnt_raise_on_closed_stream(self): + stream = StringIO() + stream.close() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + + def test_closed_shouldnt_raise_on_detached_stream(self): + stream = TextIOWrapper(StringIO()) + stream.detach() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + +class AnsiToWin32Test(TestCase): + + def testInit(self): + mockStdout = Mock() + auto = Mock() + stream = AnsiToWin32(mockStdout, autoreset=auto) + self.assertEqual(stream.wrapped, mockStdout) + self.assertEqual(stream.autoreset, auto) + + @patch('colorama.ansitowin32.winterm', None) + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + def testStripIsTrueOnWindows(self): + with osname('nt'): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + self.assertTrue(stream.strip) + + def testStripIsFalseOffWindows(self): + with osname('posix'): + mockStdout = Mock(closed=False) + stream = AnsiToWin32(mockStdout) + self.assertFalse(stream.strip) + + def testWriteStripsAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = True + + stream.write('abc') + + self.assertFalse(stream.wrapped.write.called) + self.assertEqual(stream.write_and_convert.call_args, (('abc',), {})) + + def testWriteDoesNotStripAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = False + stream.convert = False + + stream.write('abc') + + self.assertFalse(stream.write_and_convert.called) + self.assertEqual(stream.wrapped.write.call_args, (('abc',), {})) + + def assert_autoresets(self, convert, autoreset=True): + stream = AnsiToWin32(Mock()) + stream.convert = convert + stream.reset_all = Mock() + stream.autoreset = autoreset + stream.winterm = Mock() + + stream.write('abc') + + self.assertEqual(stream.reset_all.called, autoreset) + + def testWriteAutoresets(self): + self.assert_autoresets(convert=True) + self.assert_autoresets(convert=False) + self.assert_autoresets(convert=True, autoreset=False) + self.assert_autoresets(convert=False, autoreset=False) + + def testWriteAndConvertWritesPlainText(self): + stream = AnsiToWin32(Mock()) + stream.write_and_convert( 'abc' ) + self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) ) + + def testWriteAndConvertStripsAllValidAnsi(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + data = [ + 'abc\033[mdef', + 'abc\033[0mdef', + 'abc\033[2mdef', + 'abc\033[02mdef', + 'abc\033[002mdef', + 'abc\033[40mdef', + 'abc\033[040mdef', + 'abc\033[0;1mdef', + 'abc\033[40;50mdef', + 'abc\033[50;30;40mdef', + 'abc\033[Adef', + 'abc\033[0Gdef', + 'abc\033[1;20;128Hdef', + ] + for datum in data: + stream.wrapped.write.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( + [args[0] for args in stream.wrapped.write.call_args_list], + [ ('abc',), ('def',) ] + ) + + def testWriteAndConvertSkipsEmptySnippets(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + stream.write_and_convert( '\033[40m\033[41m' ) + self.assertFalse( stream.wrapped.write.called ) + + def testWriteAndConvertCallsWin32WithParamsAndCommand(self): + stream = AnsiToWin32(Mock()) + stream.convert = True + stream.call_win32 = Mock() + stream.extract_params = Mock(return_value='params') + data = { + 'abc\033[adef': ('a', 'params'), + 'abc\033[;;bdef': ('b', 'params'), + 'abc\033[0cdef': ('c', 'params'), + 'abc\033[;;0;;Gdef': ('G', 'params'), + 'abc\033[1;20;128Hdef': ('H', 'params'), + } + for datum, expected in data.items(): + stream.call_win32.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( stream.call_win32.call_args[0], expected ) + + def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + converter = AnsiToWin32(stream) + stream.close() + + converter.reset_all() + + def test_wrap_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + stream.close() + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(stream) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def test_wrap_shouldnt_raise_on_missing_closed_attr(self): + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(object()) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def testExtractParams(self): + stream = AnsiToWin32(Mock()) + data = { + '': (0,), + ';;': (0,), + '2': (2,), + ';;002;;': (2,), + '0;1': (0, 1), + ';;003;;456;;': (3, 456), + '11;22;33;44;55': (11, 22, 33, 44, 55), + } + for datum, expected in data.items(): + self.assertEqual(stream.extract_params('m', datum), expected) + + def testCallWin32UsesLookup(self): + listener = Mock() + stream = AnsiToWin32(listener) + stream.win32_calls = { + 1: (lambda *_, **__: listener(11),), + 2: (lambda *_, **__: listener(22),), + 3: (lambda *_, **__: listener(33),), + } + stream.call_win32('m', (3, 1, 99, 2)) + self.assertEqual( + [a[0][0] for a in listener.call_args_list], + [33, 11, 22] ) + + def test_osc_codes(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout, convert=True) + with patch('colorama.ansitowin32.winterm') as winterm: + data = [ + '\033]0\x07', # missing arguments + '\033]0;foo\x08', # wrong OSC command + '\033]0;colorama_test_title\x07', # should work + '\033]1;colorama_test_title\x07', # wrong set command + '\033]2;colorama_test_title\x07', # should work + '\033]' + ';' * 64 + '\x08', # see issue #247 + ] + for code in data: + stream.write(code) + self.assertEqual(winterm.set_title.call_count, 2) + + def test_native_windows_ansi(self): + with ExitStack() as stack: + def p(a, b): + stack.enter_context(patch(a, b, create=True)) + # Pretend to be on Windows + p("colorama.ansitowin32.os.name", "nt") + p("colorama.ansitowin32.winapi_test", lambda: True) + p("colorama.win32.winapi_test", lambda: True) + p("colorama.winterm.win32.windll", "non-None") + p("colorama.winterm.get_osfhandle", lambda _: 1234) + + # Pretend that our mock stream has native ANSI support + p( + "colorama.winterm.win32.GetConsoleMode", + lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = True + stdout.fileno.return_value = 1 + + # Our fake console says it has native vt support, so AnsiToWin32 should + # enable that support and do nothing else. + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertFalse(stream.strip) + self.assertFalse(stream.convert) + self.assertFalse(stream.should_wrap()) + + # Now let's pretend we're on an old Windows console, that doesn't have + # native ANSI support. + p("colorama.winterm.win32.GetConsoleMode", lambda _: 0) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertTrue(stream.strip) + self.assertTrue(stream.convert) + self.assertTrue(stream.should_wrap()) + + +if __name__ == '__main__': + main() diff --git a/site-packages/colorama/tests/initialise_test.py b/site-packages/colorama/tests/initialise_test.py new file mode 100644 index 0000000..89f9b07 --- /dev/null +++ b/site-packages/colorama/tests/initialise_test.py @@ -0,0 +1,189 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import patch, Mock +except ImportError: + from mock import patch, Mock + +from ..ansitowin32 import StreamWrapper +from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests +from .utils import osname, replace_by + +orig_stdout = sys.stdout +orig_stderr = sys.stderr + + +class InitTest(TestCase): + + @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty") + def setUp(self): + # sanity check + self.assertNotWrapped() + + def tearDown(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def assertWrapped(self): + self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped') + self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped') + self.assertTrue(isinstance(sys.stdout, StreamWrapper), + 'bad stdout wrapper') + self.assertTrue(isinstance(sys.stderr, StreamWrapper), + 'bad stderr wrapper') + + def assertNotWrapped(self): + self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped') + self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped') + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False) + def testInitWrapsOnWindows(self, _): + with osname("nt"): + init() + self.assertWrapped() + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: False) + def testInitDoesntWrapOnEmulatedWindows(self, _): + with osname("nt"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapOnNonWindows(self): + with osname("posix"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapIfNone(self): + with replace_by(None): + init() + # We can't use assertNotWrapped here because replace_by(None) + # changes stdout/stderr already. + self.assertIsNone(sys.stdout) + self.assertIsNone(sys.stderr) + + def testInitAutoresetOnWrapsOnAllPlatforms(self): + with osname("posix"): + init(autoreset=True) + self.assertWrapped() + + def testInitWrapOffDoesntWrapOnWindows(self): + with osname("nt"): + init(wrap=False) + self.assertNotWrapped() + + def testInitWrapOffIncompatibleWithAutoresetOn(self): + self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False)) + + @patch('colorama.win32.SetConsoleTextAttribute') + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetPassedOn(self, mockATW32, _): + with osname("nt"): + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 2) + self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True) + + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetChangeable(self, mockATW32): + with osname("nt"): + init() + + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 4) + self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True) + + init() + self.assertEqual(len(mockATW32.call_args_list), 6) + self.assertEqual( + mockATW32.call_args_list[4][1]['autoreset'], False) + self.assertEqual( + mockATW32.call_args_list[5][1]['autoreset'], False) + + + @patch('colorama.initialise.atexit.register') + def testAtexitRegisteredOnlyOnce(self, mockRegister): + init() + self.assertTrue(mockRegister.called) + mockRegister.reset_mock() + init() + self.assertFalse(mockRegister.called) + + +class JustFixWindowsConsoleTest(TestCase): + def _reset(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def tearDown(self): + self._reset() + + @patch("colorama.ansitowin32.winapi_test", lambda: True) + def testJustFixWindowsConsole(self): + if sys.platform != "win32": + # just_fix_windows_console should be a no-op + just_fix_windows_console() + self.assertIs(sys.stdout, orig_stdout) + self.assertIs(sys.stderr, orig_stderr) + else: + def fake_std(): + # Emulate stdout=not a tty, stderr=tty + # to check that we handle both cases correctly + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = False + stdout.fileno.return_value = 1 + sys.stdout = stdout + + stderr = Mock() + stderr.closed = False + stderr.isatty.return_value = True + stderr.fileno.return_value = 2 + sys.stderr = stderr + + for native_ansi in [False, True]: + with patch( + 'colorama.ansitowin32.enable_vt_processing', + lambda *_: native_ansi + ): + self._reset() + fake_std() + + # Regular single-call test + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + if native_ansi: + self.assertIs(sys.stderr, prev_stderr) + else: + self.assertIsNot(sys.stderr, prev_stderr) + + # second call without resetting is always a no-op + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + self.assertIs(sys.stderr, prev_stderr) + + self._reset() + fake_std() + + # If init() runs first, just_fix_windows_console should be a no-op + init() + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(prev_stdout, sys.stdout) + self.assertIs(prev_stderr, sys.stderr) + + +if __name__ == '__main__': + main() diff --git a/site-packages/colorama/tests/isatty_test.py b/site-packages/colorama/tests/isatty_test.py new file mode 100644 index 0000000..0f84e4b --- /dev/null +++ b/site-packages/colorama/tests/isatty_test.py @@ -0,0 +1,57 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansitowin32 import StreamWrapper, AnsiToWin32 +from .utils import pycharm, replace_by, replace_original_by, StreamTTY, StreamNonTTY + + +def is_a_tty(stream): + return StreamWrapper(stream, None).isatty() + +class IsattyTest(TestCase): + + def test_TTY(self): + tty = StreamTTY() + self.assertTrue(is_a_tty(tty)) + with pycharm(): + self.assertTrue(is_a_tty(tty)) + + def test_nonTTY(self): + non_tty = StreamNonTTY() + self.assertFalse(is_a_tty(non_tty)) + with pycharm(): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharm(self): + with pycharm(): + self.assertTrue(is_a_tty(sys.stderr)) + self.assertTrue(is_a_tty(sys.stdout)) + + def test_withPycharmTTYOverride(self): + tty = StreamTTY() + with pycharm(), replace_by(tty): + self.assertTrue(is_a_tty(tty)) + + def test_withPycharmNonTTYOverride(self): + non_tty = StreamNonTTY() + with pycharm(), replace_by(non_tty): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharmNoneOverride(self): + with pycharm(): + with replace_by(None), replace_original_by(None): + self.assertFalse(is_a_tty(None)) + self.assertFalse(is_a_tty(StreamNonTTY())) + self.assertTrue(is_a_tty(StreamTTY())) + + def test_withPycharmStreamWrapped(self): + with pycharm(): + self.assertTrue(AnsiToWin32(StreamTTY()).stream.isatty()) + self.assertFalse(AnsiToWin32(StreamNonTTY()).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stdout).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stderr).stream.isatty()) + + +if __name__ == '__main__': + main() diff --git a/site-packages/colorama/tests/utils.py b/site-packages/colorama/tests/utils.py new file mode 100644 index 0000000..472fafb --- /dev/null +++ b/site-packages/colorama/tests/utils.py @@ -0,0 +1,49 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from contextlib import contextmanager +from io import StringIO +import sys +import os + + +class StreamTTY(StringIO): + def isatty(self): + return True + +class StreamNonTTY(StringIO): + def isatty(self): + return False + +@contextmanager +def osname(name): + orig = os.name + os.name = name + yield + os.name = orig + +@contextmanager +def replace_by(stream): + orig_stdout = sys.stdout + orig_stderr = sys.stderr + sys.stdout = stream + sys.stderr = stream + yield + sys.stdout = orig_stdout + sys.stderr = orig_stderr + +@contextmanager +def replace_original_by(stream): + orig_stdout = sys.__stdout__ + orig_stderr = sys.__stderr__ + sys.__stdout__ = stream + sys.__stderr__ = stream + yield + sys.__stdout__ = orig_stdout + sys.__stderr__ = orig_stderr + +@contextmanager +def pycharm(): + os.environ["PYCHARM_HOSTED"] = "1" + non_tty = StreamNonTTY() + with replace_by(non_tty), replace_original_by(non_tty): + yield + del os.environ["PYCHARM_HOSTED"] diff --git a/site-packages/colorama/tests/winterm_test.py b/site-packages/colorama/tests/winterm_test.py new file mode 100644 index 0000000..d0955f9 --- /dev/null +++ b/site-packages/colorama/tests/winterm_test.py @@ -0,0 +1,131 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import Mock, patch +except ImportError: + from mock import Mock, patch + +from ..winterm import WinColor, WinStyle, WinTerm + + +class WinTermTest(TestCase): + + @patch('colorama.winterm.win32') + def testInit(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 7 + 6 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + self.assertEqual(term._fore, 7) + self.assertEqual(term._back, 6) + self.assertEqual(term._style, 8) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testGetAttrs(self): + term = WinTerm() + + term._fore = 0 + term._back = 0 + term._style = 0 + self.assertEqual(term.get_attrs(), 0) + + term._fore = WinColor.YELLOW + self.assertEqual(term.get_attrs(), WinColor.YELLOW) + + term._back = WinColor.MAGENTA + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16) + + term._style = WinStyle.BRIGHT + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT) + + @patch('colorama.winterm.win32') + def testResetAll(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 1 + 2 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + + term.set_console = Mock() + term._fore = -1 + term._back = -1 + term._style = -1 + + term.reset_all() + + self.assertEqual(term._fore, 1) + self.assertEqual(term._back, 2) + self.assertEqual(term._style, 8) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testFore(self): + term = WinTerm() + term.set_console = Mock() + term._fore = 0 + + term.fore(5) + + self.assertEqual(term._fore, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testBack(self): + term = WinTerm() + term.set_console = Mock() + term._back = 0 + + term.back(5) + + self.assertEqual(term._back, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testStyle(self): + term = WinTerm() + term.set_console = Mock() + term._style = 0 + + term.style(22) + + self.assertEqual(term._style, 22) + self.assertEqual(term.set_console.called, True) + + @patch('colorama.winterm.win32') + def testSetConsole(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console() + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDOUT, term.get_attrs()), {}) + ) + + @patch('colorama.winterm.win32') + def testSetConsoleOnStderr(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console(on_stderr=True) + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDERR, term.get_attrs()), {}) + ) + + +if __name__ == '__main__': + main() diff --git a/site-packages/colorama/win32.py b/site-packages/colorama/win32.py new file mode 100644 index 0000000..841b0e2 --- /dev/null +++ b/site-packages/colorama/win32.py @@ -0,0 +1,180 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW + _SetConsoleTitleW.argtypes = [ + wintypes.LPCWSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + _GetConsoleMode = windll.kernel32.GetConsoleMode + _GetConsoleMode.argtypes = [ + wintypes.HANDLE, + POINTER(wintypes.DWORD) + ] + _GetConsoleMode.restype = wintypes.BOOL + + _SetConsoleMode = windll.kernel32.SetConsoleMode + _SetConsoleMode.argtypes = [ + wintypes.HANDLE, + wintypes.DWORD + ] + _SetConsoleMode.restype = wintypes.BOOL + + def _winapi_test(handle): + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def winapi_test(): + return any(_winapi_test(h) for h in + (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = _GetStdHandle(stream_id) + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = _GetStdHandle(stream_id) + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = _GetStdHandle(stream_id) + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = _GetStdHandle(stream_id) + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = _GetStdHandle(stream_id) + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) + + def GetConsoleMode(handle): + mode = wintypes.DWORD() + success = _GetConsoleMode(handle, byref(mode)) + if not success: + raise ctypes.WinError() + return mode.value + + def SetConsoleMode(handle, mode): + success = _SetConsoleMode(handle, mode) + if not success: + raise ctypes.WinError() diff --git a/site-packages/colorama/winterm.py b/site-packages/colorama/winterm.py new file mode 100644 index 0000000..aad867e --- /dev/null +++ b/site-packages/colorama/winterm.py @@ -0,0 +1,195 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +try: + from msvcrt import get_osfhandle +except ImportError: + def get_osfhandle(_): + raise OSError("This isn't windows!") + + +from . import win32 + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + self._light = 0 + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + elif mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + elif mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) + + +def enable_vt_processing(fd): + if win32.windll is None or not win32.winapi_test(): + return False + + try: + handle = get_osfhandle(fd) + mode = win32.GetConsoleMode(handle) + win32.SetConsoleMode( + handle, + mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + + mode = win32.GetConsoleMode(handle) + if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING: + return True + # Can get TypeError in testsuite where 'fd' is a Mock() + except (OSError, TypeError): + return False diff --git a/site-packages/httpcore-0.18.0.dist-info/INSTALLER b/site-packages/devchat-0.2.10.dist-info/INSTALLER similarity index 100% rename from site-packages/httpcore-0.18.0.dist-info/INSTALLER rename to site-packages/devchat-0.2.10.dist-info/INSTALLER diff --git a/site-packages/devchat-0.2.9.dist-info/LICENSE b/site-packages/devchat-0.2.10.dist-info/LICENSE similarity index 100% rename from site-packages/devchat-0.2.9.dist-info/LICENSE rename to site-packages/devchat-0.2.10.dist-info/LICENSE diff --git a/site-packages/devchat-0.2.9.dist-info/METADATA b/site-packages/devchat-0.2.10.dist-info/METADATA similarity index 99% rename from site-packages/devchat-0.2.9.dist-info/METADATA rename to site-packages/devchat-0.2.10.dist-info/METADATA index 1595fa9..71845be 100644 --- a/site-packages/devchat-0.2.9.dist-info/METADATA +++ b/site-packages/devchat-0.2.10.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: devchat -Version: 0.2.9 +Version: 0.2.10 Summary: DevChat is an open-source tool that helps developers write prompts to generate code and documentation. Home-page: https://github.com/devchat-ai/devchat License: Apache-2.0 @@ -20,12 +20,13 @@ Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence Classifier: Topic :: Software Development +Requires-Dist: colorama (>=0.4.6,<0.5.0) Requires-Dist: gitpython (>=3.1.32,<4.0.0) Requires-Dist: importlib-metadata (>=6.8.0,<7.0.0) Requires-Dist: networkx (>=3.1,<4.0) Requires-Dist: openai (>=1.0rc,<2.0) Requires-Dist: oyaml (>=1.0,<2.0) -Requires-Dist: pydantic (>=1.10.7,<2.0.0) +Requires-Dist: pydantic (==1.10.13) Requires-Dist: rich_click (>=1.6.1,<2.0.0) Requires-Dist: tiktoken (>=0.4.0,<0.5.0) Requires-Dist: tinydb (>=4.7.1,<5.0.0) diff --git a/site-packages/devchat-0.2.9.dist-info/RECORD b/site-packages/devchat-0.2.10.dist-info/RECORD similarity index 65% rename from site-packages/devchat-0.2.9.dist-info/RECORD rename to site-packages/devchat-0.2.10.dist-info/RECORD index ca0c1ee..03bf30d 100644 --- a/site-packages/devchat-0.2.9.dist-info/RECORD +++ b/site-packages/devchat-0.2.10.dist-info/RECORD @@ -1,14 +1,14 @@ -../../../bin/devchat,sha256=vIdrM1HVCLtIIPguIbzu4QV0CKMZvbRJqEBeLF9xyXE,247 -devchat-0.2.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -devchat-0.2.9.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 -devchat-0.2.9.dist-info/METADATA,sha256=WJiWT9Wr22Ze88H5U3Y_QtXhoveDeXuhlYNdB86-iT0,9739 -devchat-0.2.9.dist-info/RECORD,, -devchat-0.2.9.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -devchat-0.2.9.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88 -devchat-0.2.9.dist-info/direct_url.json,sha256=zd3JCXQPIQyScA8lV44s-V6X6GM2woo8TWVYFvT3e9Y,59 -devchat-0.2.9.dist-info/entry_points.txt,sha256=Glu9CHUNBjYbZXTQc3YW2rZFr2S_3AvqKu50gj3aWT4,50 +../../../bin/devchat,sha256=suM8Tlq7_32mdUbhBivdAeg9W---BD14D3E7bhy9LOc,247 +devchat-0.2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +devchat-0.2.10.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +devchat-0.2.10.dist-info/METADATA,sha256=NxGPR5qcRawRtREwinMGazaQIaefdeOOui9A60ynYo0,9775 +devchat-0.2.10.dist-info/RECORD,, +devchat-0.2.10.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +devchat-0.2.10.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88 +devchat-0.2.10.dist-info/direct_url.json,sha256=zd3JCXQPIQyScA8lV44s-V6X6GM2woo8TWVYFvT3e9Y,59 +devchat-0.2.10.dist-info/entry_points.txt,sha256=Glu9CHUNBjYbZXTQc3YW2rZFr2S_3AvqKu50gj3aWT4,50 devchat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -devchat/__main__.py,sha256=KOQA2JI9HBmG32X3K8Uisa_tmkIn_gT9z1QPSnxzAOs,109 +devchat/__main__.py,sha256=RY7_u5N5S0Ye2YtBWeYkk9n8zia_z9oe3DnS1SHRxZA,110 devchat/__pycache__/__init__.cpython-39.pyc,, devchat/__pycache__/__main__.cpython-39.pyc,, devchat/__pycache__/assistant.cpython-39.pyc,, @@ -29,14 +29,14 @@ devchat/_cli/__pycache__/utils.cpython-39.pyc,, devchat/_cli/log.py,sha256=GAYVrNPprDQEB6G2Z1J97jjDU-EbYlJBHnz-Lz6nzvo,3106 devchat/_cli/main.py,sha256=_uJ6KOiV19kATA0CYUtJZ1gGX9IGcX_8pjQyZ9J8wSU,659 devchat/_cli/prompt.py,sha256=uvug9x7zclL0P1xbT_xjFsSCx2PjjLtmGUf0O8Sx1Ek,3923 -devchat/_cli/run.py,sha256=52eABhjkUuNgd44CaGyoHRfgAdO9tr6MZBSFikqts_g,4129 +devchat/_cli/run.py,sha256=nCCqNV7IuTxp7XrroHShR772qKWDXGmx8vM388QjPW8,4849 devchat/_cli/topic.py,sha256=CLE8y2Vox_5igtoSfsnFgaCa7YtJE-rcDtoNhnnedyQ,1455 -devchat/_cli/utils.py,sha256=MQUpQ4JYB9nhlgKwBLUNnpsMZfGiJ2xLST2RgZUKWOU,4993 +devchat/_cli/utils.py,sha256=u43D4lqihdil1BEenaryzP-NUp5CQo4jTmtq640gTLY,5975 devchat/anthropic/__init__.py,sha256=xaFR1uXxn0sVHBhCJdJKuWKVVgPnSLw3mlaCFFivD_8,97 devchat/anthropic/__pycache__/__init__.cpython-39.pyc,, devchat/anthropic/__pycache__/anthropic_chat.cpython-39.pyc,, devchat/anthropic/anthropic_chat.py,sha256=OujoXOQywPQf4gjLhdZBYTwKoRDs8hujktss3hN-BNk,423 -devchat/assistant.py,sha256=VOpfvX6Tbq57EYqoOo2V-HJ91D02-s2URc2ZdM7wRlM,6077 +devchat/assistant.py,sha256=qOU8u0nrRbruTmH0FS7Ax2H8aOws5uLOnVC8v-WzFoU,6033 devchat/chat.py,sha256=TEO8OndmL4hpJ1D-QAFKO-JB_7w1kTeUC3VVwL9FSUQ,1676 devchat/config.py,sha256=3lvhi-YRbCOM2Ye28GJF14n10mEYczD3sllhz_ZwZS8,6348 devchat/engine/__init__.py,sha256=sXaM_4kQtG-VV7NxMDj7a7v4rbNg7dJHEMF8BOz9NtI,262 @@ -53,9 +53,9 @@ devchat/openai/__pycache__/__init__.cpython-39.pyc,, devchat/openai/__pycache__/openai_chat.cpython-39.pyc,, devchat/openai/__pycache__/openai_message.cpython-39.pyc,, devchat/openai/__pycache__/openai_prompt.cpython-39.pyc,, -devchat/openai/openai_chat.py,sha256=ELEa31w-138cyZ08eHqG9FGtwNzsBCy1UfSXhtdnOT0,3684 +devchat/openai/openai_chat.py,sha256=aME5qfzvZsnoUKJ344uaUJ27okTk9if46nF3T9DeMK0,3826 devchat/openai/openai_message.py,sha256=xTmglsj5Iyvcytn3pUYhwkuiyJSx932N88fS4OCJ7Qk,3293 -devchat/openai/openai_prompt.py,sha256=Jikny5lkjt_u3LaKxlzXgUdR8ttZrUMzIVghr63VlYk,10848 +devchat/openai/openai_prompt.py,sha256=M9NIBP5W9DwzFJyMO0L9DYM470383wQOZJwZv4KvQfs,10856 devchat/prompt.py,sha256=WAHa6LmVU1xvBp6AGalQ1TzQuVwt1lsdBd70OazVoW0,9523 -devchat/store.py,sha256=pdZ3TjzNcAyPcvyD9Roc12SY5znYMh3waM0z9yF9o2w,9845 -devchat/utils.py,sha256=rUQc-iy7d7DL8xBNj_1C1W1cag9jD8xeaqdnDaFnZ9U,7509 +devchat/store.py,sha256=PI2HvMyZmIV1XyyjIr5rPayagBQWJUWsEdpUCBZ7xLU,9879 +devchat/utils.py,sha256=_-FUAC-4ZKoF0q7eg6xWz6hrj0rKJKLHenK4S_uZvkE,7643 diff --git a/site-packages/click-8.1.7.dist-info/REQUESTED b/site-packages/devchat-0.2.10.dist-info/REQUESTED similarity index 100% rename from site-packages/click-8.1.7.dist-info/REQUESTED rename to site-packages/devchat-0.2.10.dist-info/REQUESTED diff --git a/site-packages/devchat-0.2.9.dist-info/WHEEL b/site-packages/devchat-0.2.10.dist-info/WHEEL similarity index 100% rename from site-packages/devchat-0.2.9.dist-info/WHEEL rename to site-packages/devchat-0.2.10.dist-info/WHEEL diff --git a/site-packages/devchat-0.2.9.dist-info/direct_url.json b/site-packages/devchat-0.2.10.dist-info/direct_url.json similarity index 100% rename from site-packages/devchat-0.2.9.dist-info/direct_url.json rename to site-packages/devchat-0.2.10.dist-info/direct_url.json diff --git a/site-packages/devchat-0.2.9.dist-info/entry_points.txt b/site-packages/devchat-0.2.10.dist-info/entry_points.txt similarity index 100% rename from site-packages/devchat-0.2.9.dist-info/entry_points.txt rename to site-packages/devchat-0.2.10.dist-info/entry_points.txt diff --git a/site-packages/devchat-0.2.9.dist-info/REQUESTED b/site-packages/devchat-0.2.9.dist-info/REQUESTED deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/devchat/.DS_Store b/site-packages/devchat/.DS_Store deleted file mode 100644 index fc0f020c957c48c466aaea5e812b855c70682c14..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T0#gnqEtjU zeyhwj0i%FX;IAn_YuADR?jVKa>-vobX)?;%?RQqITT6|m-LzNi8~;H}{4C7Iqi#67 zqS3h!X*Bo4=puE7Yt zwtLW-ZM)N>y*9f0o!QK>*SB_#&w|JFiHjGdAH(fN+N#DmyrT0-=h+|fRPcL@(R>F& z;E=%s3_-vE61ZCo!ru@pQExzu)bEiphj=ka7bunJHjye3j`2%}ke0Fy`SmFuF3L~7 zK&eD`zBH<+7m>YEO__*Mz$oy~6`=jW#YX69%oWP513P&IK(sI{4Sl*xP>rk6)0iv7 z3>s6Vh^mz7D+W{L7^h452oMD^bLi{x8w6&IUSg%(DX(Dqd-xCx*E3V z{6GEq{$DIIBcp&(V4)OXR?qKsk&-@JE9B^`b+O%IBcpk_LRo^HK8~eBNAV^$X_#}# XLG(1{3NeCa{s>4JOlK7MRRul(p&<(a diff --git a/site-packages/devchat/_cli/run.py b/site-packages/devchat/_cli/run.py index f01a307..d9a78ad 100644 --- a/site-packages/devchat/_cli/run.py +++ b/site-packages/devchat/_cli/run.py @@ -4,7 +4,10 @@ import shutil import sys from typing import List import rich_click as click -from git import Repo, GitCommandError +try: + from git import Repo, GitCommandError +except Exception: + pass from devchat._cli.utils import init_dir, handle_errors, valid_git_repo, clone_git_repo from devchat._cli.utils import download_and_extract_workflow from devchat.engine import Namespace, CommandParser, RecursivePrompter @@ -89,8 +92,8 @@ def _clone_or_pull_git_repo(target_dir: str, repo_urls: List[str], zip_urls: Lis try: download_and_extract_workflow(url, target_dir) break - except Exception as e: - logger.exception("Failed to download and extract workflow: %s", e) + except Exception as err: + logger.exception("Failed to download and extract workflow: %s", err) return if os.path.exists(target_dir): diff --git a/site-packages/devchat/_cli/utils.py b/site-packages/devchat/_cli/utils.py index 676c5c0..ac7805d 100644 --- a/site-packages/devchat/_cli/utils.py +++ b/site-packages/devchat/_cli/utils.py @@ -3,11 +3,13 @@ import os import sys import json import shutil -import requests -import zipfile -from urllib.parse import urlparse from typing import Tuple, List, Optional, Any -from git import Repo, InvalidGitRepositoryError, GitCommandError +import zipfile +import requests +try: + from git import Repo, InvalidGitRepositoryError, GitCommandError +except Exception: + pass import rich_click as click from devchat.config import ConfigManager, OpenAIModelConfig from devchat.utils import find_root_dir, add_gitignore, setup_logger, get_logger @@ -22,20 +24,20 @@ def download_and_extract_workflow(workflow_url, target_dir): # Downaload file to temp dir os.makedirs(target_dir, exist_ok=True) zip_path = os.path.join(target_dir, 'workflow.zip') - with open(zip_path, 'wb') as f: + with open(zip_path, 'wb') as file_handle: for chunk in response.iter_content(chunk_size=8192): if chunk: - f.write(chunk) + file_handle.write(chunk) # Extract the zip file parent_dir = os.path.dirname(target_dir) with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(parent_dir) - + # Delete target directory if exists if os.path.exists(target_dir): shutil.rmtree(target_dir) - + # Rename extracted directory to target directory extracted_dir = os.path.join(parent_dir, 'workflows-main') os.rename(extracted_dir, target_dir) diff --git a/site-packages/devchat/openai/openai_prompt.py b/site-packages/devchat/openai/openai_prompt.py index c074180..ab4916c 100644 --- a/site-packages/devchat/openai/openai_prompt.py +++ b/site-packages/devchat/openai/openai_prompt.py @@ -214,9 +214,9 @@ class OpenAIPrompt(Prompt): self.responses[index].stream_from_dict(delta) if 'function_call' in delta: - if 'name' in delta['function_call']: + if 'name' in delta['function_call'] and \ + self.responses[index].function_call.get('name', '') == '': self.responses[index].function_call['name'] = \ - self.responses[index].function_call.get('name', '') + \ delta['function_call']['name'] if 'arguments' in delta['function_call']: self.responses[index].function_call['arguments'] = \ diff --git a/site-packages/devchat/utils.py b/site-packages/devchat/utils.py index ff84a56..18c96bf 100644 --- a/site-packages/devchat/utils.py +++ b/site-packages/devchat/utils.py @@ -131,7 +131,11 @@ def get_user_info() -> Tuple[str, str]: cmd = ['git', 'config', 'user.name'] user_name = subprocess.check_output(cmd, encoding='utf-8').strip() except Exception: - user_name = getpass.getuser() + try: + user_name = getpass.getuser() + except Exception: + user_dir = os.path.expanduser("~") + user_name = user_dir.split(os.sep)[-1] try: cmd = ['git', 'config', 'user.email'] diff --git a/site-packages/distro-1.8.0.dist-info/RECORD b/site-packages/distro-1.8.0.dist-info/RECORD index a0cb2d0..d725539 100644 --- a/site-packages/distro-1.8.0.dist-info/RECORD +++ b/site-packages/distro-1.8.0.dist-info/RECORD @@ -1,4 +1,4 @@ -../../../bin/distro,sha256=dnDD1oUCxitRiZYscod_zaN_Jx9vaObsAqxc7z0ISzY,243 +../../../bin/distro,sha256=MvzdEoL8w1_5cIA9f0DnTWI5Nedce0jHhqtxkt8PE_4,243 distro-1.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 distro-1.8.0.dist-info/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 distro-1.8.0.dist-info/METADATA,sha256=NhYw94UPXb78_Z3_VtLxTJ1zQgUUKoTndg10uKJX800,6915 diff --git a/site-packages/distro/.DS_Store b/site-packages/distro/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0=3.8 -Requires-Dist: anyio<5.0,>=3.0 Requires-Dist: certifi Requires-Dist: h11<0.15,>=0.13 -Requires-Dist: sniffio==1.* +Provides-Extra: asyncio +Requires-Dist: anyio<5.0,>=4.0; extra == 'asyncio' Provides-Extra: http2 Requires-Dist: h2<5,>=3; extra == 'http2' Provides-Extra: socks Requires-Dist: socksio==1.*; extra == 'socks' +Provides-Extra: trio +Requires-Dist: trio<0.23.0,>=0.22.0; extra == 'trio' Description-Content-Type: text/markdown # HTTP Core @@ -70,16 +73,10 @@ For HTTP/1.1 only support, install with: $ pip install httpcore ``` -For HTTP/1.1 and HTTP/2 support, install with: +There are also a number of optional extras available... ```shell -$ pip install httpcore[http2] -``` - -For SOCKS proxy support, install with: - -```shell -$ pip install httpcore[socks] +$ pip install httpcore['asyncio,trio,http2,socks'] ``` # Sending requests @@ -124,12 +121,59 @@ The motivation for `httpcore` is: * To provide a reusable low-level client library, that other packages can then build on top of. * To provide a *really clear interface split* between the networking code and client logic, so that each is easier to understand and reason about in isolation. + +## Dependencies + +The `httpcore` package has the following dependencies... + +* `h11` +* `certifi` + +And the following optional extras... + +* `anyio` - Required by `pip install httpcore['asyncio']`. +* `trio` - Required by `pip install httpcore['trio']`. +* `h2` - Required by `pip install httpcore['http2']`. +* `socksio` - Required by `pip install httpcore['socks']`. + +## Versioning + +We use [SEMVER for our versioning policy](https://semver.org/). + +For changes between package versions please see our [project changelog](CHANGELOG.md). + +We recommend pinning your requirements either the most current major version, or a more specific version range: + +```python +pip install 'httpcore==1.*' +``` # Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +## 1.0.2 (November 10th, 2023) + +- Fix `float("inf")` timeouts in `Event.wait` function. (#846) + +## 1.0.1 (November 3rd, 2023) + +- Fix pool timeout to account for the total time spent retrying. (#823) +- Raise a neater RuntimeError when the correct async deps are not installed. (#826) +- Add support for synchronous TLS-in-TLS streams. (#840) + +## 1.0.0 (October 6th, 2023) + +From version 1.0 our async support is now optional, as the package has minimal dependencies by default. + +For async support use either `pip install 'httpcore[asyncio]'` or `pip install 'httpcore[trio]'`. + +The project versioning policy is now explicitly governed by SEMVER. See https://semver.org/. + +- Async support becomes fully optional. (#809) +- Add support for Python 3.12. (#807) + ## 0.18.0 (September 8th, 2023) - Add support for HTTPS proxies. (#745, #786) diff --git a/site-packages/httpcore-0.18.0.dist-info/RECORD b/site-packages/httpcore-1.0.2.dist-info/RECORD similarity index 79% rename from site-packages/httpcore-0.18.0.dist-info/RECORD rename to site-packages/httpcore-1.0.2.dist-info/RECORD index e2215f2..124b578 100644 --- a/site-packages/httpcore-0.18.0.dist-info/RECORD +++ b/site-packages/httpcore-1.0.2.dist-info/RECORD @@ -1,9 +1,9 @@ -httpcore-0.18.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -httpcore-0.18.0.dist-info/METADATA,sha256=HYLl3BBm0kYpuqAPEqFCT0ErYv1rHgvxkY4-EuwPlEY,18914 -httpcore-0.18.0.dist-info/RECORD,, -httpcore-0.18.0.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 -httpcore-0.18.0.dist-info/licenses/LICENSE.md,sha256=_ctZFUx0y6uhahEkL3dAvqnyPW_rVUeRfYxflKgDkqU,1518 -httpcore/__init__.py,sha256=VfaTITS2e1pevgXk6fF0cZSH3f4YUt_iXqGYSsbyyY0,3338 +httpcore-1.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +httpcore-1.0.2.dist-info/METADATA,sha256=APYVcc50lK6LWlbwft616GXXpg3gRxKY1Srm-g0xEHM,20442 +httpcore-1.0.2.dist-info/RECORD,, +httpcore-1.0.2.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 +httpcore-1.0.2.dist-info/licenses/LICENSE.md,sha256=_ctZFUx0y6uhahEkL3dAvqnyPW_rVUeRfYxflKgDkqU,1518 +httpcore/__init__.py,sha256=_nBsHMRwZbRGG3ci9bZPcyBwL5z3_SM1EEEafTw37EA,3337 httpcore/__pycache__/__init__.cpython-39.pyc,, httpcore/__pycache__/_api.cpython-39.pyc,, httpcore/__pycache__/_exceptions.cpython-39.pyc,, @@ -23,7 +23,7 @@ httpcore/_async/__pycache__/http_proxy.cpython-39.pyc,, httpcore/_async/__pycache__/interfaces.cpython-39.pyc,, httpcore/_async/__pycache__/socks_proxy.cpython-39.pyc,, httpcore/_async/connection.py,sha256=klHOqiHVo4TUC_X9IsKEnfMHKQcH-WajYAkszBuLXOA,8619 -httpcore/_async/connection_pool.py,sha256=hj1viqcWZivNmoRu-QZjyuOvAFx3-Ae2rMpuK6OZhEM,14305 +httpcore/_async/connection_pool.py,sha256=k9pkv4w_tntmxzDtkHuM54nJmHT3aabxf_7ou5b-9L4,14767 httpcore/_async/http11.py,sha256=t7I91I77Zh06UVopMfqvrGskFdpwnQLo5kt04qTCI7U,12441 httpcore/_async/http2.py,sha256=KXwWZxZ-43vxIWzr1aTLErhaCodDzFr-XAvzc4fUb10,23879 httpcore/_async/http_proxy.py,sha256=hl4t-PahlAuCGtKNYRx4LSgjx1ZuspE9oDBaL6BOess,14851 @@ -38,10 +38,10 @@ httpcore/_backends/__pycache__/mock.cpython-39.pyc,, httpcore/_backends/__pycache__/sync.cpython-39.pyc,, httpcore/_backends/__pycache__/trio.cpython-39.pyc,, httpcore/_backends/anyio.py,sha256=mU8gtunBSLxESGkU0Iy1ZMgumDlAeMkwBjFE3kZiCnc,5208 -httpcore/_backends/auto.py,sha256=8r0ipGxSwXoCb_xKQAyRwL1UzfXVbO4Ee2y8vYQv3Ic,1654 +httpcore/_backends/auto.py,sha256=Q_iQjNuwJseqBxeYJYtiaGzFs08_LGI3K_egYrixEqE,1683 httpcore/_backends/base.py,sha256=Qsb8b_PSiVP1ldHHGXHxQzJ1Qlzj2r8KR9KQeANkSbE,3218 httpcore/_backends/mock.py,sha256=S4IADhC6kE22ge_jR_WHlEUkD6QAsXnwz26DSWZLcG4,4179 -httpcore/_backends/sync.py,sha256=z4emZ__8qOAWewBtpkkl3gkpR210RN1l3J8Nud0kZc8,8347 +httpcore/_backends/sync.py,sha256=LAomvc-MAlot5-S9CCFxnr561aDp9yhyfs_65WeCkZ4,8086 httpcore/_backends/trio.py,sha256=INOeHEkA8pO6AsSqjColWcayM0FQSyGi1hpaQghjrCs,6078 httpcore/_exceptions.py,sha256=7zb3KNiG0qmfUNIdFgdaUSbn2Pu3oztghi6Vg7i-LJU,1185 httpcore/_models.py,sha256=GTqsbLHxsd_lx0cvtgUBf7OltodKHjIrNKs-DbSc67k,16370 @@ -56,13 +56,13 @@ httpcore/_sync/__pycache__/http_proxy.cpython-39.pyc,, httpcore/_sync/__pycache__/interfaces.cpython-39.pyc,, httpcore/_sync/__pycache__/socks_proxy.cpython-39.pyc,, httpcore/_sync/connection.py,sha256=luocU3Tv3jlvOCaro33xrHZAwBkpn991LWdL7BC8Bkg,8408 -httpcore/_sync/connection_pool.py,sha256=1iwYLdiq3pi9LBvpMZ8O8gWdb56qqPlm6rp35zeORBQ,13928 +httpcore/_sync/connection_pool.py,sha256=JUt-Rpg2l_17JbckVMk2TCs-EbBeUf1swH3EYC5AZVU,14384 httpcore/_sync/http11.py,sha256=1VRRlpqQgIKjxt9xQAeUbDH1Mq3280pC1AU_gwu19VQ,12102 httpcore/_sync/http2.py,sha256=lkpHesGkrwzIA4oHLyClJf5IAwRLcaAFMnmffAahAK4,23343 httpcore/_sync/http_proxy.py,sha256=82oin8vjt2a7YmmVvz7sXEZSBuajK-mHDF-EwnR_pJ0,14613 httpcore/_sync/interfaces.py,sha256=EM4PTf-rgkclzisFcrTyx1G8FwraoffE8rbckOznX_o,4365 httpcore/_sync/socks_proxy.py,sha256=T13QSceeEAg1PM9Yh7Nk-DoqI28TIUqDS-9O3OSC9Uc,13707 -httpcore/_synchronization.py,sha256=_d_vHqylvzm1Jh58_0G7i-1VwCg3Gu39Cgd4nWASvP0,8751 +httpcore/_synchronization.py,sha256=ABAtHNAESvZqu43FP_lTKpuJ84bavUbAZoN2q8E0W3o,8054 httpcore/_trace.py,sha256=akf5PsWVq3rZjqmXniomU59OY37K7JHoeNDCQ4GU84E,3954 httpcore/_utils.py,sha256=9QPh5ib4JilWX4dBCC_XO6wdBY4b0kbUGgfV3QfBANc,1525 httpcore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/site-packages/httpcore-0.18.0.dist-info/WHEEL b/site-packages/httpcore-1.0.2.dist-info/WHEEL similarity index 100% rename from site-packages/httpcore-0.18.0.dist-info/WHEEL rename to site-packages/httpcore-1.0.2.dist-info/WHEEL diff --git a/site-packages/httpcore-0.18.0.dist-info/licenses/LICENSE.md b/site-packages/httpcore-1.0.2.dist-info/licenses/LICENSE.md similarity index 100% rename from site-packages/httpcore-0.18.0.dist-info/licenses/LICENSE.md rename to site-packages/httpcore-1.0.2.dist-info/licenses/LICENSE.md diff --git a/site-packages/httpcore/.DS_Store b/site-packages/httpcore/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 "AsyncConnectionPool": + # Acquiring the pool lock here ensures that we have the + # correct dependencies installed as early as possible. + async with self._pool_lock: + pass return self async def __aexit__( diff --git a/site-packages/httpcore/_backends/.DS_Store b/site-packages/httpcore/_backends/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 None: if not (hasattr(self, "_backend")): - backend = sniffio.current_async_library() + backend = current_async_library() if backend == "trio": from .trio import TrioBackend diff --git a/site-packages/httpcore/_backends/sync.py b/site-packages/httpcore/_backends/sync.py index f2dbd32..7b7b417 100644 --- a/site-packages/httpcore/_backends/sync.py +++ b/site-packages/httpcore/_backends/sync.py @@ -145,12 +145,6 @@ class SyncStream(NetworkStream): server_hostname: typing.Optional[str] = None, timeout: typing.Optional[float] = None, ) -> NetworkStream: - if isinstance(self._sock, ssl.SSLSocket): # pragma: no cover - raise RuntimeError( - "Attempted to add a TLS layer on top of the existing " - "TLS stream, which is not supported by httpcore package" - ) - exc_map: ExceptionMapping = { socket.timeout: ConnectTimeout, OSError: ConnectError, diff --git a/site-packages/httpcore/_sync/.DS_Store b/site-packages/httpcore/_sync/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 "ConnectionPool": + # Acquiring the pool lock here ensures that we have the + # correct dependencies installed as early as possible. + with self._pool_lock: + pass return self def __exit__( diff --git a/site-packages/httpcore/_synchronization.py b/site-packages/httpcore/_synchronization.py index bae27c1..119d89f 100644 --- a/site-packages/httpcore/_synchronization.py +++ b/site-packages/httpcore/_synchronization.py @@ -2,8 +2,6 @@ import threading from types import TracebackType from typing import Optional, Type -import sniffio - from ._exceptions import ExceptionMapping, PoolTimeout, map_exceptions # Our async synchronization primatives use either 'anyio' or 'trio' depending @@ -20,6 +18,32 @@ except ImportError: # pragma: nocover anyio = None # type: ignore +def current_async_library() -> str: + # Determine if we're running under trio or asyncio. + # See https://sniffio.readthedocs.io/en/latest/ + try: + import sniffio + except ImportError: # pragma: nocover + environment = "asyncio" + else: + environment = sniffio.current_async_library() + + if environment not in ("asyncio", "trio"): # pragma: nocover + raise RuntimeError("Running under an unsupported async environment.") + + if environment == "asyncio" and anyio is None: # pragma: nocover + raise RuntimeError( + "Running with asyncio requires installation of 'httpcore[asyncio]'." + ) + + if environment == "trio" and trio is None: # pragma: nocover + raise RuntimeError( + "Running with trio requires installation of 'httpcore[trio]'." + ) + + return environment + + class AsyncLock: def __init__(self) -> None: self._backend = "" @@ -29,18 +53,10 @@ class AsyncLock: Detect if we're running under 'asyncio' or 'trio' and create a lock with the correct implementation. """ - self._backend = sniffio.current_async_library() + self._backend = current_async_library() if self._backend == "trio": - if trio is None: # pragma: nocover - raise RuntimeError( - "Running under trio, requires the 'trio' package to be installed." - ) self._trio_lock = trio.Lock() - else: - if anyio is None: # pragma: nocover - raise RuntimeError( - "Running under asyncio requires the 'anyio' package to be installed." - ) + elif self._backend == "asyncio": self._anyio_lock = anyio.Lock() async def __aenter__(self) -> "AsyncLock": @@ -49,7 +65,7 @@ class AsyncLock: if self._backend == "trio": await self._trio_lock.acquire() - else: + elif self._backend == "asyncio": await self._anyio_lock.acquire() return self @@ -62,7 +78,7 @@ class AsyncLock: ) -> None: if self._backend == "trio": self._trio_lock.release() - else: + elif self._backend == "asyncio": self._anyio_lock.release() @@ -75,18 +91,10 @@ class AsyncEvent: Detect if we're running under 'asyncio' or 'trio' and create a lock with the correct implementation. """ - self._backend = sniffio.current_async_library() + self._backend = current_async_library() if self._backend == "trio": - if trio is None: # pragma: nocover - raise RuntimeError( - "Running under trio requires the 'trio' package to be installed." - ) self._trio_event = trio.Event() - else: - if anyio is None: # pragma: nocover - raise RuntimeError( - "Running under asyncio requires the 'anyio' package to be installed." - ) + elif self._backend == "asyncio": self._anyio_event = anyio.Event() def set(self) -> None: @@ -95,7 +103,7 @@ class AsyncEvent: if self._backend == "trio": self._trio_event.set() - else: + elif self._backend == "asyncio": self._anyio_event.set() async def wait(self, timeout: Optional[float] = None) -> None: @@ -103,22 +111,12 @@ class AsyncEvent: self.setup() if self._backend == "trio": - if trio is None: # pragma: nocover - raise RuntimeError( - "Running under trio requires the 'trio' package to be installed." - ) - trio_exc_map: ExceptionMapping = {trio.TooSlowError: PoolTimeout} timeout_or_inf = float("inf") if timeout is None else timeout with map_exceptions(trio_exc_map): with trio.fail_after(timeout_or_inf): await self._trio_event.wait() - else: - if anyio is None: # pragma: nocover - raise RuntimeError( - "Running under asyncio requires the 'anyio' package to be installed." - ) - + elif self._backend == "asyncio": anyio_exc_map: ExceptionMapping = {TimeoutError: PoolTimeout} with map_exceptions(anyio_exc_map): with anyio.fail_after(timeout): @@ -135,22 +133,12 @@ class AsyncSemaphore: Detect if we're running under 'asyncio' or 'trio' and create a semaphore with the correct implementation. """ - self._backend = sniffio.current_async_library() + self._backend = current_async_library() if self._backend == "trio": - if trio is None: # pragma: nocover - raise RuntimeError( - "Running under trio requires the 'trio' package to be installed." - ) - self._trio_semaphore = trio.Semaphore( initial_value=self._bound, max_value=self._bound ) - else: - if anyio is None: # pragma: nocover - raise RuntimeError( - "Running under asyncio requires the 'anyio' package to be installed." - ) - + elif self._backend == "asyncio": self._anyio_semaphore = anyio.Semaphore( initial_value=self._bound, max_value=self._bound ) @@ -161,13 +149,13 @@ class AsyncSemaphore: if self._backend == "trio": await self._trio_semaphore.acquire() - else: + elif self._backend == "asyncio": await self._anyio_semaphore.acquire() async def release(self) -> None: if self._backend == "trio": self._trio_semaphore.release() - else: + elif self._backend == "asyncio": self._anyio_semaphore.release() @@ -184,27 +172,17 @@ class AsyncShieldCancellation: Detect if we're running under 'asyncio' or 'trio' and create a shielded scope with the correct implementation. """ - self._backend = sniffio.current_async_library() + self._backend = current_async_library() if self._backend == "trio": - if trio is None: # pragma: nocover - raise RuntimeError( - "Running under trio requires the 'trio' package to be installed." - ) - self._trio_shield = trio.CancelScope(shield=True) - else: - if anyio is None: # pragma: nocover - raise RuntimeError( - "Running under asyncio requires the 'anyio' package to be installed." - ) - + elif self._backend == "asyncio": self._anyio_shield = anyio.CancelScope(shield=True) def __enter__(self) -> "AsyncShieldCancellation": if self._backend == "trio": self._trio_shield.__enter__() - else: + elif self._backend == "asyncio": self._anyio_shield.__enter__() return self @@ -216,7 +194,7 @@ class AsyncShieldCancellation: ) -> None: if self._backend == "trio": self._trio_shield.__exit__(exc_type, exc_value, traceback) - else: + elif self._backend == "asyncio": self._anyio_shield.__exit__(exc_type, exc_value, traceback) @@ -248,6 +226,8 @@ class Event: self._event.set() def wait(self, timeout: Optional[float] = None) -> None: + if timeout == float("inf"): # pragma: no cover + timeout = None if not self._event.wait(timeout=timeout): raise PoolTimeout() # pragma: nocover diff --git a/site-packages/importlib_resources-6.1.0.dist-info/INSTALLER b/site-packages/httpx-0.25.1.dist-info/INSTALLER similarity index 100% rename from site-packages/importlib_resources-6.1.0.dist-info/INSTALLER rename to site-packages/httpx-0.25.1.dist-info/INSTALLER diff --git a/site-packages/httpx-0.25.0.dist-info/METADATA b/site-packages/httpx-0.25.1.dist-info/METADATA similarity index 88% rename from site-packages/httpx-0.25.0.dist-info/METADATA rename to site-packages/httpx-0.25.1.dist-info/METADATA index f3a6d50..e4abd66 100644 --- a/site-packages/httpx-0.25.0.dist-info/METADATA +++ b/site-packages/httpx-0.25.1.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: httpx -Version: 0.25.0 +Version: 0.25.1 Summary: The next generation HTTP client. Project-URL: Changelog, https://github.com/encode/httpx/blob/master/CHANGELOG.md Project-URL: Documentation, https://www.python-httpx.org @@ -22,10 +22,12 @@ Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 Classifier: Topic :: Internet :: WWW/HTTP Requires-Python: >=3.8 +Requires-Dist: anyio Requires-Dist: certifi -Requires-Dist: httpcore<0.19.0,>=0.18.0 +Requires-Dist: httpcore Requires-Dist: idna Requires-Dist: sniffio Provides-Extra: brotli @@ -192,23 +194,14 @@ inspiration around the lower-level networking details. ## Release Information -### Removed +### 0.25.1 (3rd November, 2023) -* Drop support for Python 3.7. (#2813) - -### Added - -* Support HTTPS proxies. (#2845) -* Change the type of `Extensions` from `Mapping[Str, Any]` to `MutableMapping[Str, Any]`. (#2803) -* Add `socket_options` argument to `httpx.HTTPTransport` and `httpx.AsyncHTTPTransport` classes. (#2716) -* The `Response.raise_for_status()` method now returns the response instance. For example: `data = httpx.get('...').raise_for_status().json()`. (#2776) +* Add support for Python 3.12. (#2854) +* Add support for httpcore 1.0 (#2885) ### Fixed -* Return `500` error response instead of exceptions when `raise_app_exceptions=False` is set on `ASGITransport`. (#2669) -* Ensure all `WSGITransport` environs have a `SERVER_PROTOCOL`. (#2708) -* Always encode forward slashes as `%2F` in query parameters (#2723) -* Use Mozilla documentation instead of `httpstatuses.com` for HTTP error reference (#2768) +* Raise `ValueError` on `Response.encoding` being set after `Response.text` has been accessed. (#2852) --- diff --git a/site-packages/httpx-0.25.0.dist-info/RECORD b/site-packages/httpx-0.25.1.dist-info/RECORD similarity index 67% rename from site-packages/httpx-0.25.0.dist-info/RECORD rename to site-packages/httpx-0.25.1.dist-info/RECORD index 7d9f9f1..282918b 100644 --- a/site-packages/httpx-0.25.0.dist-info/RECORD +++ b/site-packages/httpx-0.25.1.dist-info/RECORD @@ -1,10 +1,10 @@ -../../../bin/httpx,sha256=KIP6KGLZF0RAHfVjqe3qTPD8GRYex0qWFBxARqKqu94,235 -httpx-0.25.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -httpx-0.25.0.dist-info/METADATA,sha256=8ZnffZ9BAo4gzHpfUFlvx2tQYM3-Z3FRwOh4BsgKgLk,7630 -httpx-0.25.0.dist-info/RECORD,, -httpx-0.25.0.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 -httpx-0.25.0.dist-info/entry_points.txt,sha256=2lVkdQmxLA1pNMgSN2eV89o90HCZezhmNwsy6ryKDSA,37 -httpx-0.25.0.dist-info/licenses/LICENSE.md,sha256=TsWdVE8StfU5o6cW_TIaxYzNgDC0ZSIfLIgCAM3yjY0,1508 +../../../bin/httpx,sha256=Ug7WqeS1tnOXHYhEuY-5dLJW9cLyfsYkhpAZ_-JlYc0,235 +httpx-0.25.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +httpx-0.25.1.dist-info/METADATA,sha256=F-1gg84Cs1LReAfx4NmiNXp-qE9DqJKD3pHD1B64y5A,7095 +httpx-0.25.1.dist-info/RECORD,, +httpx-0.25.1.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 +httpx-0.25.1.dist-info/entry_points.txt,sha256=2lVkdQmxLA1pNMgSN2eV89o90HCZezhmNwsy6ryKDSA,37 +httpx-0.25.1.dist-info/licenses/LICENSE.md,sha256=TsWdVE8StfU5o6cW_TIaxYzNgDC0ZSIfLIgCAM3yjY0,1508 httpx/__init__.py,sha256=oCxVAsePEy5DE9eLhGAAq9H3RBGZUDaUROtGEyzbBRo,3210 httpx/__pycache__/__init__.cpython-39.pyc,, httpx/__pycache__/__version__.cpython-39.pyc,, @@ -24,18 +24,18 @@ httpx/__pycache__/_types.cpython-39.pyc,, httpx/__pycache__/_urlparse.cpython-39.pyc,, httpx/__pycache__/_urls.cpython-39.pyc,, httpx/__pycache__/_utils.cpython-39.pyc,, -httpx/__version__.py,sha256=hVLEi3Fe6ziNKrXxfBIkYicKi_9NvZZvMQJMGUkSjIU,108 +httpx/__version__.py,sha256=Qdj8HFntcoM-E84xA8Y05EMH5ocBUPDZX722C_2Tvi0,108 httpx/_api.py,sha256=cVU9ErzaXve5rqoPoSHr9yJbovHtICrcxR7yBoNSeOw,13011 -httpx/_auth.py,sha256=58FA-xqqp-XgLZ7Emd4-et-XXuTRaa5buiBYB2MzyvE,11773 +httpx/_auth.py,sha256=4ZLi3wfSttjqxOQFxys7hZ55b-8xq3ZKypa-sEz3d-Q,12013 httpx/_client.py,sha256=A9MPP_d1ZlqcO5CeGLgyzVwdHgCpROYSdjoAUA6rpYE,68131 -httpx/_compat.py,sha256=S4sL2QocE1gj0qn6F7egl7dYhY809rXE5P9YD3e_zDg,1602 -httpx/_config.py,sha256=3AKbxOVuh2UeZi95UqALtoO1thrmHUhTxvcFAyspKPw,12491 +httpx/_compat.py,sha256=rJERfjHkRvvHFVfltbHyCVcAboNsfEeN6j_00Z2C4k8,1563 +httpx/_config.py,sha256=_NFrJwZr0yFysMaTt3rCnJqHvV5vQQG3yUg1kxSomjs,12334 httpx/_content.py,sha256=olbWqawdWWweXeW6gDYHPiEGjip5lqFZKv9OmVd-zIg,8092 httpx/_decoders.py,sha256=dd8GSkEAe45BzRUF47zH_lg3-BcwXtxzPBSGP5Y4F90,9739 httpx/_exceptions.py,sha256=xKw-U6vW7zmdReUAGYHMegYWZuDAuE5039L087SHe4Q,7880 httpx/_main.py,sha256=m9C4RuqjOB6UqL3FFHMjmC45f4SDSO-iOREFLdw4IdM,15784 -httpx/_models.py,sha256=gfrZvx3B0R2U7bLT7JfNKt4DMnUoT8leHOiij6Te13A,42791 -httpx/_multipart.py,sha256=qzt35jAgapaRPwdq-lTKSA5YY6ayrfDIsZLdr3t4NWc,8972 +httpx/_models.py,sha256=6lIr7avUeJo2qp0vN4HSOgI96TPzbq1HtGSHNRwO-gg,42896 +httpx/_multipart.py,sha256=yTaczu2EcFX5GcOmDW8_2x2w2d1j4_8qFcsUCyYLSUI,8960 httpx/_status_codes.py,sha256=XKArMrSoo8oKBQCHdFGA-wsM2PcSTaHE8svDYOUcwWk,5584 httpx/_transports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 httpx/_transports/__pycache__/__init__.cpython-39.pyc,, @@ -46,11 +46,11 @@ httpx/_transports/__pycache__/mock.cpython-39.pyc,, httpx/_transports/__pycache__/wsgi.cpython-39.pyc,, httpx/_transports/asgi.py,sha256=ZoIHy1-Wu09vZRkjzVzXJFgw9sh0Dq5cG8zfgtqK-SA,5469 httpx/_transports/base.py,sha256=0BM8yZZEkdFT4tXXSm0h0dK0cSYA4hLgInj_BljGEGw,2510 -httpx/_transports/default.py,sha256=u99Nctd3mDNHfOc-S_ldaEwSvlp8NGLwIW2rjY0VgP4,13192 +httpx/_transports/default.py,sha256=Kn-RztYMwpHT3rBna3UVYE39W67ipEXnUr1FJTc0-9s,13175 httpx/_transports/mock.py,sha256=sDt3BDXbz8-W94kC8OXtGzF1PWH0y73h1De7Q-XkVtg,1179 httpx/_transports/wsgi.py,sha256=Zt3EhTagyF3o-HC2oPMp-hTy3M3kQThL1ECJRc8eXEM,4797 httpx/_types.py,sha256=W_lOq_3FnHmZGQuXaGm5JDykFoC0WoqhnfH92nRDNGQ,3367 -httpx/_urlparse.py,sha256=UQbI0l39smQh5UplxFAtLYfuxSx1cC_JPivhBPBSWgk,16774 +httpx/_urlparse.py,sha256=Vpz_ydrcGCy5ReNqrU9lhuqsZJLER3hE-Wq5crYCb48,16777 httpx/_urls.py,sha256=JAONd-2reXpB_WuQ7WuvhUcLuebiQeYJQPyszADmCow,21840 -httpx/_utils.py,sha256=I_m2rFyEpoU2j8lS0GwdWcUTBTQ8cjvFnZ8ROmnCpR8,15403 +httpx/_utils.py,sha256=ugUzRBmO4i_wDXSmyaGhOiQ6w4WAXCopG-uxrdn16dk,14109 httpx/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/site-packages/httpx-0.25.0.dist-info/WHEEL b/site-packages/httpx-0.25.1.dist-info/WHEEL similarity index 100% rename from site-packages/httpx-0.25.0.dist-info/WHEEL rename to site-packages/httpx-0.25.1.dist-info/WHEEL diff --git a/site-packages/httpx-0.25.0.dist-info/entry_points.txt b/site-packages/httpx-0.25.1.dist-info/entry_points.txt similarity index 100% rename from site-packages/httpx-0.25.0.dist-info/entry_points.txt rename to site-packages/httpx-0.25.1.dist-info/entry_points.txt diff --git a/site-packages/httpx-0.25.0.dist-info/licenses/LICENSE.md b/site-packages/httpx-0.25.1.dist-info/licenses/LICENSE.md similarity index 100% rename from site-packages/httpx-0.25.0.dist-info/licenses/LICENSE.md rename to site-packages/httpx-0.25.1.dist-info/licenses/LICENSE.md diff --git a/site-packages/httpx/.DS_Store b/site-packages/httpx/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 typing.Generator[Request, Response, None]: @@ -217,6 +220,8 @@ class DigestAuth(Auth): request.headers["Authorization"] = self._build_auth_header( request, self._last_challenge ) + if response.cookies: + Cookies(response.cookies).set_cookie_header(request=request) yield request def _parse_challenge( diff --git a/site-packages/httpx/_compat.py b/site-packages/httpx/_compat.py index a271c6b..493e621 100644 --- a/site-packages/httpx/_compat.py +++ b/site-packages/httpx/_compat.py @@ -16,9 +16,7 @@ except ImportError: # pragma: no cover except ImportError: brotli = None -if sys.version_info >= (3, 10) or ( - sys.version_info >= (3, 8) and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0, 7) -): +if sys.version_info >= (3, 10) or ssl.OPENSSL_VERSION_INFO >= (1, 1, 0, 7): def set_minimum_tls_version_1_2(context: ssl.SSLContext) -> None: # The OP_NO_SSL* and OP_NO_TLS* become deprecated in favor of diff --git a/site-packages/httpx/_config.py b/site-packages/httpx/_config.py index 39d81a2..45ed29e 100644 --- a/site-packages/httpx/_config.py +++ b/site-packages/httpx/_config.py @@ -1,7 +1,6 @@ import logging import os import ssl -import sys import typing from pathlib import Path @@ -128,11 +127,10 @@ class SSLConfig: # Signal to server support for PHA in TLS 1.3. Raises an # AttributeError if only read-only access is implemented. - if sys.version_info >= (3, 8): # pragma: no cover - try: - context.post_handshake_auth = True - except AttributeError: # pragma: no cover - pass + try: + context.post_handshake_auth = True + except AttributeError: # pragma: no cover + pass # Disable using 'commonName' for SSLContext.check_hostname # when the 'subjectAltName' extension isn't available. @@ -168,10 +166,9 @@ class SSLConfig: alpn_idents = ["http/1.1", "h2"] if self.http2 else ["http/1.1"] context.set_alpn_protocols(alpn_idents) - if sys.version_info >= (3, 8): # pragma: no cover - keylogfile = os.environ.get("SSLKEYLOGFILE") - if keylogfile and self.trust_env: - context.keylog_filename = keylogfile + keylogfile = os.environ.get("SSLKEYLOGFILE") + if keylogfile and self.trust_env: + context.keylog_filename = keylogfile return context diff --git a/site-packages/httpx/_models.py b/site-packages/httpx/_models.py index e1e45cf..4e4162d 100644 --- a/site-packages/httpx/_models.py +++ b/site-packages/httpx/_models.py @@ -43,7 +43,6 @@ from ._types import ( ) from ._urls import URL from ._utils import ( - guess_json_utf, is_known_encoding, normalize_header_key, normalize_header_value, @@ -603,6 +602,16 @@ class Response: @encoding.setter def encoding(self, value: str) -> None: + """ + Set the encoding to use for decoding the byte content into text. + + If the `text` attribute has been accessed, attempting to set the + encoding will throw a ValueError. + """ + if hasattr(self, "_text"): + raise ValueError( + "Setting encoding after `text` has been accessed is not allowed." + ) self._encoding = value @property @@ -749,11 +758,7 @@ class Response: raise HTTPStatusError(message, request=request, response=self) def json(self, **kwargs: typing.Any) -> typing.Any: - if self.charset_encoding is None and self.content and len(self.content) > 3: - encoding = guess_json_utf(self.content) - if encoding is not None: - return jsonlib.loads(self.content.decode(encoding), **kwargs) - return jsonlib.loads(self.text, **kwargs) + return jsonlib.loads(self.content, **kwargs) @property def cookies(self) -> "Cookies": diff --git a/site-packages/httpx/_multipart.py b/site-packages/httpx/_multipart.py index 446f4ad..6d5baa8 100644 --- a/site-packages/httpx/_multipart.py +++ b/site-packages/httpx/_multipart.py @@ -1,4 +1,3 @@ -import binascii import io import os import typing @@ -200,7 +199,7 @@ class MultipartStream(SyncByteStream, AsyncByteStream): boundary: typing.Optional[bytes] = None, ) -> None: if boundary is None: - boundary = binascii.hexlify(os.urandom(16)) + boundary = os.urandom(16).hex().encode("ascii") self.boundary = boundary self.content_type = "multipart/form-data; boundary=%s" % boundary.decode( diff --git a/site-packages/httpx/_transports/.DS_Store b/site-packages/httpx/_transports/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 typing.Iterator[None]: try: yield - except Exception as exc: # noqa: PIE-786 + except Exception as exc: mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): diff --git a/site-packages/httpx/_urlparse.py b/site-packages/httpx/_urlparse.py index e1ba8dc..8e06042 100644 --- a/site-packages/httpx/_urlparse.py +++ b/site-packages/httpx/_urlparse.py @@ -87,7 +87,7 @@ COMPONENT_REGEX = { # We use these simple regexs as a first pass before handing off to # the stdlib 'ipaddress' module for IP address validation. -IPv4_STYLE_HOSTNAME = re.compile(r"^[0-9]+.[0-9]+.[0-9]+.[0-9]+$") +IPv4_STYLE_HOSTNAME = re.compile(r"^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$") IPv6_STYLE_HOSTNAME = re.compile(r"^\[.*\]$") diff --git a/site-packages/httpx/_utils.py b/site-packages/httpx/_utils.py index 1775b1a..ba5807c 100644 --- a/site-packages/httpx/_utils.py +++ b/site-packages/httpx/_utils.py @@ -91,41 +91,6 @@ def format_form_param(name: str, value: str) -> bytes: return f'{name}="{value}"'.encode() -# Null bytes; no need to recreate these on each call to guess_json_utf -_null = b"\x00" -_null2 = _null * 2 -_null3 = _null * 3 - - -def guess_json_utf(data: bytes) -> typing.Optional[str]: - # JSON always starts with two ASCII characters, so detection is as - # easy as counting the nulls and from their location and count - # determine the encoding. Also detect a BOM, if present. - sample = data[:4] - if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return "utf-32" # BOM included - if sample[:3] == codecs.BOM_UTF8: - return "utf-8-sig" # BOM included, MS style (discouraged) - if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return "utf-16" # BOM included - nullcount = sample.count(_null) - if nullcount == 0: - return "utf-8" - if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return "utf-16-be" - if sample[1::2] == _null2: # 2nd and 4th are null - return "utf-16-le" - # Did not detect 2 valid UTF-16 ascii-range characters - if nullcount == 3: - if sample[:3] == _null3: - return "utf-32-be" - if sample[1:] == _null3: - return "utf-32-le" - # Did not detect a valid UTF-32 ascii-range character - return None - - def get_ca_bundle_from_env() -> typing.Optional[str]: if "SSL_CERT_FILE" in os.environ: ssl_file = Path(os.environ["SSL_CERT_FILE"]) diff --git a/site-packages/idna/.DS_Store b/site-packages/idna/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0=3.8 -License-File: LICENSE -Requires-Dist: zipp >=3.1.0 ; python_version < "3.10" -Provides-Extra: docs -Requires-Dist: sphinx >=3.5 ; extra == 'docs' -Requires-Dist: sphinx <7.2.5 ; extra == 'docs' -Requires-Dist: jaraco.packaging >=9.3 ; extra == 'docs' -Requires-Dist: rst.linker >=1.9 ; extra == 'docs' -Requires-Dist: furo ; extra == 'docs' -Requires-Dist: sphinx-lint ; extra == 'docs' -Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'docs' -Provides-Extra: testing -Requires-Dist: pytest >=6 ; extra == 'testing' -Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'testing' -Requires-Dist: pytest-cov ; extra == 'testing' -Requires-Dist: pytest-enabler >=2.2 ; extra == 'testing' -Requires-Dist: pytest-ruff ; extra == 'testing' -Requires-Dist: zipp >=3.17 ; extra == 'testing' -Requires-Dist: pytest-black >=0.3.7 ; (platform_python_implementation != "PyPy") and extra == 'testing' -Requires-Dist: pytest-mypy >=0.9.1 ; (platform_python_implementation != "PyPy") and extra == 'testing' - -.. image:: https://img.shields.io/pypi/v/importlib_resources.svg - :target: https://pypi.org/project/importlib_resources - -.. image:: https://img.shields.io/pypi/pyversions/importlib_resources.svg - -.. image:: https://github.com/python/importlib_resources/workflows/tests/badge.svg - :target: https://github.com/python/importlib_resources/actions?query=workflow%3A%22tests%22 - :alt: tests - -.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json - :target: https://github.com/astral-sh/ruff - :alt: Ruff - -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: Code style: Black - -.. image:: https://readthedocs.org/projects/importlib-resources/badge/?version=latest - :target: https://importlib-resources.readthedocs.io/en/latest/?badge=latest - -.. image:: https://img.shields.io/badge/skeleton-2023-informational - :target: https://blog.jaraco.com/skeleton - -.. image:: https://tidelift.com/badges/package/pypi/importlib-resources - :target: https://tidelift.com/subscription/pkg/pypi-importlib-resources?utm_source=pypi-importlib-resources&utm_medium=readme - -``importlib_resources`` is a backport of Python standard library -`importlib.resources -`_ -module for older Pythons. - -The key goal of this module is to replace parts of `pkg_resources -`_ with a -solution in Python's stdlib that relies on well-defined APIs. This makes -reading resources included in packages easier, with more stable and consistent -semantics. - -Compatibility -============= - -New features are introduced in this third-party library and later merged -into CPython. The following table indicates which versions of this library -were contributed to different versions in the standard library: - -.. list-table:: - :header-rows: 1 - - * - importlib_resources - - stdlib - * - 6.0 - - 3.13 - * - 5.12 - - 3.12 - * - 5.7 - - 3.11 - * - 5.0 - - 3.10 - * - 1.3 - - 3.9 - * - 0.5 (?) - - 3.7 - -For Enterprise -============== - -Available as part of the Tidelift Subscription. - -This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. - -`Learn more `_. diff --git a/site-packages/importlib_resources-6.1.0.dist-info/RECORD b/site-packages/importlib_resources-6.1.0.dist-info/RECORD deleted file mode 100644 index 114291f..0000000 --- a/site-packages/importlib_resources-6.1.0.dist-info/RECORD +++ /dev/null @@ -1,72 +0,0 @@ -importlib_resources-6.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -importlib_resources-6.1.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 -importlib_resources-6.1.0.dist-info/METADATA,sha256=sv8t8W7Di2p_H14Lu2B9GY_dPyup1R-xqDOCMR--jHI,4122 -importlib_resources-6.1.0.dist-info/RECORD,, -importlib_resources-6.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources-6.1.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 -importlib_resources-6.1.0.dist-info/top_level.txt,sha256=fHIjHU1GZwAjvcydpmUnUrTnbvdiWjG4OEVZK8by0TQ,20 -importlib_resources/__init__.py,sha256=t3v1sx-q_TzszzsOs3dqNOjqBQVbSB_KY-BjCvf65qQ,226 -importlib_resources/__pycache__/__init__.cpython-39.pyc,, -importlib_resources/__pycache__/_adapters.cpython-39.pyc,, -importlib_resources/__pycache__/_common.cpython-39.pyc,, -importlib_resources/__pycache__/_compat.cpython-39.pyc,, -importlib_resources/__pycache__/_itertools.cpython-39.pyc,, -importlib_resources/__pycache__/abc.cpython-39.pyc,, -importlib_resources/__pycache__/readers.cpython-39.pyc,, -importlib_resources/__pycache__/simple.cpython-39.pyc,, -importlib_resources/_adapters.py,sha256=vprJGbUeHbajX6XCuMP6J3lMrqCi-P_MTlziJUR7jfk,4482 -importlib_resources/_common.py,sha256=jSC4xfLdcMNbtbWHtpzbFkNa0W7kvf__nsYn14C_AEU,5457 -importlib_resources/_compat.py,sha256=6MduVulDb7YTwL5f1KAzJUYdYUs12ZAnew_nnCqq0_8,3304 -importlib_resources/_itertools.py,sha256=eDisV6RqiNZOogLSXf6LOGHOYc79FGgPrKNLzFLmCrU,1277 -importlib_resources/abc.py,sha256=Icr2IJ2QtH7vvAB9vC5WRJ9KBoaDyJa7KUs8McuROzo,5140 -importlib_resources/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources/readers.py,sha256=ytBi6cqzs_viuvwhEOh0rTI9b8-QLnMaxW7E_LQI1Pc,5409 -importlib_resources/simple.py,sha256=0__2TQBTQoqkajYmNPt1HxERcReAT6boVKJA328pr04,2576 -importlib_resources/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources/tests/__pycache__/__init__.cpython-39.pyc,, -importlib_resources/tests/__pycache__/_compat.cpython-39.pyc,, -importlib_resources/tests/__pycache__/_path.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_compatibilty_files.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_contents.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_custom.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_files.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_open.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_path.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_read.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_reader.cpython-39.pyc,, -importlib_resources/tests/__pycache__/test_resource.cpython-39.pyc,, -importlib_resources/tests/__pycache__/util.cpython-39.pyc,, -importlib_resources/tests/__pycache__/zip.cpython-39.pyc,, -importlib_resources/tests/_compat.py,sha256=YTSB0U1R9oADnh6GrQcOCgojxcF_N6H1LklymEWf9SQ,708 -importlib_resources/tests/_path.py,sha256=nkv3ek7D1U898v921rYbldDCtKri2oyYOi3EJqGjEGU,1289 -importlib_resources/tests/data01/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources/tests/data01/__pycache__/__init__.cpython-39.pyc,, -importlib_resources/tests/data01/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4 -importlib_resources/tests/data01/subdirectory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources/tests/data01/subdirectory/__pycache__/__init__.cpython-39.pyc,, -importlib_resources/tests/data01/subdirectory/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4 -importlib_resources/tests/data01/utf-16.file,sha256=t5q9qhxX0rYqItBOM8D3ylwG-RHrnOYteTLtQr6sF7g,44 -importlib_resources/tests/data01/utf-8.file,sha256=kwWgYG4yQ-ZF2X_WA66EjYPmxJRn-w8aSOiS9e8tKYY,20 -importlib_resources/tests/data02/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources/tests/data02/__pycache__/__init__.cpython-39.pyc,, -importlib_resources/tests/data02/one/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources/tests/data02/one/__pycache__/__init__.cpython-39.pyc,, -importlib_resources/tests/data02/one/resource1.txt,sha256=10flKac7c-XXFzJ3t-AB5MJjlBy__dSZvPE_dOm2q6U,13 -importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt,sha256=jnrBBztxYrtQck7cmVnc4xQVO4-agzAZDGSFkAWtlFw,10 -importlib_resources/tests/data02/two/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_resources/tests/data02/two/__pycache__/__init__.cpython-39.pyc,, -importlib_resources/tests/data02/two/resource2.txt,sha256=lt2jbN3TMn9QiFKM832X39bU_62UptDdUkoYzkvEbl0,13 -importlib_resources/tests/namespacedata01/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4 -importlib_resources/tests/namespacedata01/utf-16.file,sha256=t5q9qhxX0rYqItBOM8D3ylwG-RHrnOYteTLtQr6sF7g,44 -importlib_resources/tests/namespacedata01/utf-8.file,sha256=kwWgYG4yQ-ZF2X_WA66EjYPmxJRn-w8aSOiS9e8tKYY,20 -importlib_resources/tests/test_compatibilty_files.py,sha256=95N_R7aik8cvnE6sBJpsxmP0K5plOWRIJDgbalD-Hpw,3314 -importlib_resources/tests/test_contents.py,sha256=V1Xfk3lqTDdvUsZuV18Kndf0CT_tkM2oEIwk9Vv0rhg,968 -importlib_resources/tests/test_custom.py,sha256=jVYg9idEVdUN6idHUfDDlZ-zDWl56qYNbj5QrcZO76Y,1124 -importlib_resources/tests/test_files.py,sha256=W5XoBWSTr84Ke15UtjqWLet2iUDUyJfQxbST4PDlj2w,3283 -importlib_resources/tests/test_open.py,sha256=9qvdC6Eu2Kn3mh3xDR5HUEQoePSKIecTxU4vnH9veO8,2671 -importlib_resources/tests/test_path.py,sha256=XR5RI7_zndI_Nqw9eHU1tDmSGIo29N1GP8INodPc584,2142 -importlib_resources/tests/test_read.py,sha256=ZEUosdzSMHxF6s7u9sWI9M-LbIJXyFtR34tpA6eGzrs,2476 -importlib_resources/tests/test_reader.py,sha256=WHlZKJBa3MOHnWQG2CB02l8LQ4QjQ0kG_KdcVqZSZ4o,4945 -importlib_resources/tests/test_resource.py,sha256=3rY9zrUKAlOwSuHmgrVw-tGPpQ9HxRDeGwcFYwbtKHc,7188 -importlib_resources/tests/util.py,sha256=ZJ9ouR8UOZRbgQ6_ZeXxXBvcjOlSVae4ckIMqzhyAZg,4784 -importlib_resources/tests/zip.py,sha256=2MKmF8-osXBJSnqcUTuAUek_-tSB3iKmIT9qPhcsOsM,783 diff --git a/site-packages/importlib_resources-6.1.0.dist-info/REQUESTED b/site-packages/importlib_resources-6.1.0.dist-info/REQUESTED deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources-6.1.0.dist-info/top_level.txt b/site-packages/importlib_resources-6.1.0.dist-info/top_level.txt deleted file mode 100644 index 58ad1bd..0000000 --- a/site-packages/importlib_resources-6.1.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -importlib_resources diff --git a/site-packages/importlib_resources/__init__.py b/site-packages/importlib_resources/__init__.py deleted file mode 100644 index e6b60c1..0000000 --- a/site-packages/importlib_resources/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Read resources contained within a package.""" - -from ._common import ( - as_file, - files, - Package, -) - -from .abc import ResourceReader - - -__all__ = [ - 'Package', - 'ResourceReader', - 'as_file', - 'files', -] diff --git a/site-packages/importlib_resources/_adapters.py b/site-packages/importlib_resources/_adapters.py deleted file mode 100644 index 50688fb..0000000 --- a/site-packages/importlib_resources/_adapters.py +++ /dev/null @@ -1,168 +0,0 @@ -from contextlib import suppress -from io import TextIOWrapper - -from . import abc - - -class SpecLoaderAdapter: - """ - Adapt a package spec to adapt the underlying loader. - """ - - def __init__(self, spec, adapter=lambda spec: spec.loader): - self.spec = spec - self.loader = adapter(spec) - - def __getattr__(self, name): - return getattr(self.spec, name) - - -class TraversableResourcesLoader: - """ - Adapt a loader to provide TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - def get_resource_reader(self, name): - return CompatibilityFiles(self.spec)._native() - - -def _io_wrapper(file, mode='r', *args, **kwargs): - if mode == 'r': - return TextIOWrapper(file, *args, **kwargs) - elif mode == 'rb': - return file - raise ValueError(f"Invalid mode value '{mode}', only 'r' and 'rb' are supported") - - -class CompatibilityFiles: - """ - Adapter for an existing or non-existent resource reader - to provide a compatibility .files(). - """ - - class SpecPath(abc.Traversable): - """ - Path tied to a module spec. - Can be read and exposes the resource reader children. - """ - - def __init__(self, spec, reader): - self._spec = spec - self._reader = reader - - def iterdir(self): - if not self._reader: - return iter(()) - return iter( - CompatibilityFiles.ChildPath(self._reader, path) - for path in self._reader.contents() - ) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - if not self._reader: - return CompatibilityFiles.OrphanPath(other) - return CompatibilityFiles.ChildPath(self._reader, other) - - @property - def name(self): - return self._spec.name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) - - class ChildPath(abc.Traversable): - """ - Path tied to a resource reader child. - Can be read but doesn't expose any meaningful children. - """ - - def __init__(self, reader, name): - self._reader = reader - self._name = name - - def iterdir(self): - return iter(()) - - def is_file(self): - return self._reader.is_resource(self.name) - - def is_dir(self): - return not self.is_file() - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(self.name, other) - - @property - def name(self): - return self._name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper( - self._reader.open_resource(self.name), mode, *args, **kwargs - ) - - class OrphanPath(abc.Traversable): - """ - Orphan path, not tied to a module spec or resource reader. - Can't be read and doesn't expose any meaningful children. - """ - - def __init__(self, *path_parts): - if len(path_parts) < 1: - raise ValueError('Need at least one path part to construct a path') - self._path = path_parts - - def iterdir(self): - return iter(()) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(*self._path, other) - - @property - def name(self): - return self._path[-1] - - def open(self, mode='r', *args, **kwargs): - raise FileNotFoundError("Can't open orphan path") - - def __init__(self, spec): - self.spec = spec - - @property - def _reader(self): - with suppress(AttributeError): - return self.spec.loader.get_resource_reader(self.spec.name) - - def _native(self): - """ - Return the native reader if it supports files(). - """ - reader = self._reader - return reader if hasattr(reader, 'files') else self - - def __getattr__(self, attr): - return getattr(self._reader, attr) - - def files(self): - return CompatibilityFiles.SpecPath(self.spec, self._reader) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - """ - return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/site-packages/importlib_resources/_common.py b/site-packages/importlib_resources/_common.py deleted file mode 100644 index 3c6de1c..0000000 --- a/site-packages/importlib_resources/_common.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -import pathlib -import tempfile -import functools -import contextlib -import types -import importlib -import inspect -import warnings -import itertools - -from typing import Union, Optional, cast -from .abc import ResourceReader, Traversable - -from ._compat import wrap_spec - -Package = Union[types.ModuleType, str] -Anchor = Package - - -def package_to_anchor(func): - """ - Replace 'package' parameter as 'anchor' and warn about the change. - - Other errors should fall through. - - >>> files('a', 'b') - Traceback (most recent call last): - TypeError: files() takes from 0 to 1 positional arguments but 2 were given - """ - undefined = object() - - @functools.wraps(func) - def wrapper(anchor=undefined, package=undefined): - if package is not undefined: - if anchor is not undefined: - return func(anchor, package) - warnings.warn( - "First parameter to files is renamed to 'anchor'", - DeprecationWarning, - stacklevel=2, - ) - return func(package) - elif anchor is undefined: - return func() - return func(anchor) - - return wrapper - - -@package_to_anchor -def files(anchor: Optional[Anchor] = None) -> Traversable: - """ - Get a Traversable resource for an anchor. - """ - return from_package(resolve(anchor)) - - -def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: - """ - Return the package's loader if it's a ResourceReader. - """ - # We can't use - # a issubclass() check here because apparently abc.'s __subclasscheck__() - # hook wants to create a weak reference to the object, but - # zipimport.zipimporter does not support weak references, resulting in a - # TypeError. That seems terrible. - spec = package.__spec__ - reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore - if reader is None: - return None - return reader(spec.name) # type: ignore - - -@functools.singledispatch -def resolve(cand: Optional[Anchor]) -> types.ModuleType: - return cast(types.ModuleType, cand) - - -@resolve.register -def _(cand: str) -> types.ModuleType: - return importlib.import_module(cand) - - -@resolve.register -def _(cand: None) -> types.ModuleType: - return resolve(_infer_caller().f_globals['__name__']) - - -def _infer_caller(): - """ - Walk the stack and find the frame of the first caller not in this module. - """ - - def is_this_file(frame_info): - return frame_info.filename == __file__ - - def is_wrapper(frame_info): - return frame_info.function == 'wrapper' - - not_this_file = itertools.filterfalse(is_this_file, inspect.stack()) - # also exclude 'wrapper' due to singledispatch in the call stack - callers = itertools.filterfalse(is_wrapper, not_this_file) - return next(callers).frame - - -def from_package(package: types.ModuleType): - """ - Return a Traversable object for the given package. - - """ - spec = wrap_spec(package) - reader = spec.loader.get_resource_reader(spec.name) - return reader.files() - - -@contextlib.contextmanager -def _tempfile( - reader, - suffix='', - # gh-93353: Keep a reference to call os.remove() in late Python - # finalization. - *, - _os_remove=os.remove, -): - # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' - # blocks due to the need to close the temporary file to work on Windows - # properly. - fd, raw_path = tempfile.mkstemp(suffix=suffix) - try: - try: - os.write(fd, reader()) - finally: - os.close(fd) - del reader - yield pathlib.Path(raw_path) - finally: - try: - _os_remove(raw_path) - except FileNotFoundError: - pass - - -def _temp_file(path): - return _tempfile(path.read_bytes, suffix=path.name) - - -def _is_present_dir(path: Traversable) -> bool: - """ - Some Traversables implement ``is_dir()`` to raise an - exception (i.e. ``FileNotFoundError``) when the - directory doesn't exist. This function wraps that call - to always return a boolean and only return True - if there's a dir and it exists. - """ - with contextlib.suppress(FileNotFoundError): - return path.is_dir() - return False - - -@functools.singledispatch -def as_file(path): - """ - Given a Traversable object, return that object as a - path on the local file system in a context manager. - """ - return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) - - -@as_file.register(pathlib.Path) -@contextlib.contextmanager -def _(path): - """ - Degenerate behavior for pathlib.Path objects. - """ - yield path - - -@contextlib.contextmanager -def _temp_path(dir: tempfile.TemporaryDirectory): - """ - Wrap tempfile.TemporyDirectory to return a pathlib object. - """ - with dir as result: - yield pathlib.Path(result) - - -@contextlib.contextmanager -def _temp_dir(path): - """ - Given a traversable dir, recursively replicate the whole tree - to the file system in a context manager. - """ - assert path.is_dir() - with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: - yield _write_contents(temp_dir, path) - - -def _write_contents(target, source): - child = target.joinpath(source.name) - if source.is_dir(): - child.mkdir() - for item in source.iterdir(): - _write_contents(child, item) - else: - child.write_bytes(source.read_bytes()) - return child diff --git a/site-packages/importlib_resources/_compat.py b/site-packages/importlib_resources/_compat.py deleted file mode 100644 index d7e9f0d..0000000 --- a/site-packages/importlib_resources/_compat.py +++ /dev/null @@ -1,126 +0,0 @@ -# flake8: noqa - -import abc -import os -import sys -import pathlib -import warnings -from contextlib import suppress -from typing import Union - - -if sys.version_info >= (3, 10): - from zipfile import Path as ZipPath # type: ignore -else: - from zipp import Path as ZipPath # type: ignore - - -try: - from typing import runtime_checkable # type: ignore -except ImportError: - - def runtime_checkable(cls): # type: ignore - return cls - - -try: - from typing import Protocol # type: ignore -except ImportError: - Protocol = abc.ABC # type: ignore - - -class TraversableResourcesLoader: - """ - Adapt loaders to provide TraversableResources and other - compatibility. - - Used primarily for Python 3.9 and earlier where the native - loaders do not yet implement TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - @property - def path(self): - return self.spec.origin - - def get_resource_reader(self, name): - from . import readers, _adapters - - def _zip_reader(spec): - with suppress(AttributeError): - return readers.ZipReader(spec.loader, spec.name) - - def _namespace_reader(spec): - with suppress(AttributeError, ValueError): - return readers.NamespaceReader(spec.submodule_search_locations) - - def _available_reader(spec): - with suppress(AttributeError): - return spec.loader.get_resource_reader(spec.name) - - def _native_reader(spec): - reader = _available_reader(spec) - return reader if hasattr(reader, 'files') else None - - def _file_reader(spec): - try: - path = pathlib.Path(self.path) - except TypeError: - return None - if path.exists(): - return readers.FileReader(self) - - return ( - # local ZipReader if a zip module - _zip_reader(self.spec) - or - # local NamespaceReader if a namespace module - _namespace_reader(self.spec) - or - # local FileReader - _file_reader(self.spec) - or - # native reader if it supplies 'files' - _native_reader(self.spec) - or - # fallback - adapt the spec ResourceReader to TraversableReader - _adapters.CompatibilityFiles(self.spec) - ) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - - Supersedes _adapters.wrap_spec to use TraversableResourcesLoader - from above for older Python compatibility (<3.10). - """ - from . import _adapters - - return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) - - -if sys.version_info >= (3, 9): - StrPath = Union[str, os.PathLike[str]] -else: - # PathLike is only subscriptable at runtime in 3.9+ - StrPath = Union[str, "os.PathLike[str]"] - - -def ensure_traversable(path): - """ - Convert deprecated string arguments to traversables (pathlib.Path). - """ - if not isinstance(path, str): - return path - - warnings.warn( - "String arguments are deprecated. Pass a Traversable instead.", - DeprecationWarning, - stacklevel=3, - ) - - return pathlib.Path(path) diff --git a/site-packages/importlib_resources/_itertools.py b/site-packages/importlib_resources/_itertools.py deleted file mode 100644 index 7b775ef..0000000 --- a/site-packages/importlib_resources/_itertools.py +++ /dev/null @@ -1,38 +0,0 @@ -# from more_itertools 9.0 -def only(iterable, default=None, too_long=None): - """If *iterable* has only one item, return it. - If it has zero items, return *default*. - If it has more than one item, raise the exception given by *too_long*, - which is ``ValueError`` by default. - >>> only([], default='missing') - 'missing' - >>> only([1]) - 1 - >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 1, 2, - and perhaps more.' - >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError - Note that :func:`only` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check - iterable contents less destructively. - """ - it = iter(iterable) - first_value = next(it, default) - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value diff --git a/site-packages/importlib_resources/abc.py b/site-packages/importlib_resources/abc.py deleted file mode 100644 index 23b6aea..0000000 --- a/site-packages/importlib_resources/abc.py +++ /dev/null @@ -1,170 +0,0 @@ -import abc -import io -import itertools -import pathlib -from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional - -from ._compat import runtime_checkable, Protocol, StrPath - - -__all__ = ["ResourceReader", "Traversable", "TraversableResources"] - - -class ResourceReader(metaclass=abc.ABCMeta): - """Abstract base class for loaders to provide resource reading support.""" - - @abc.abstractmethod - def open_resource(self, resource: Text) -> BinaryIO: - """Return an opened, file-like object for binary reading. - - The 'resource' argument is expected to represent only a file name. - If the resource cannot be found, FileNotFoundError is raised. - """ - # This deliberately raises FileNotFoundError instead of - # NotImplementedError so that if this method is accidentally called, - # it'll still do the right thing. - raise FileNotFoundError - - @abc.abstractmethod - def resource_path(self, resource: Text) -> Text: - """Return the file system path to the specified resource. - - The 'resource' argument is expected to represent only a file name. - If the resource does not exist on the file system, raise - FileNotFoundError. - """ - # This deliberately raises FileNotFoundError instead of - # NotImplementedError so that if this method is accidentally called, - # it'll still do the right thing. - raise FileNotFoundError - - @abc.abstractmethod - def is_resource(self, path: Text) -> bool: - """Return True if the named 'path' is a resource. - - Files are resources, directories are not. - """ - raise FileNotFoundError - - @abc.abstractmethod - def contents(self) -> Iterable[str]: - """Return an iterable of entries in `package`.""" - raise FileNotFoundError - - -class TraversalError(Exception): - pass - - -@runtime_checkable -class Traversable(Protocol): - """ - An object with a subset of pathlib.Path methods suitable for - traversing directories and opening files. - - Any exceptions that occur when accessing the backing resource - may propagate unaltered. - """ - - @abc.abstractmethod - def iterdir(self) -> Iterator["Traversable"]: - """ - Yield Traversable objects in self - """ - - def read_bytes(self) -> bytes: - """ - Read contents of self as bytes - """ - with self.open('rb') as strm: - return strm.read() - - def read_text(self, encoding: Optional[str] = None) -> str: - """ - Read contents of self as text - """ - with self.open(encoding=encoding) as strm: - return strm.read() - - @abc.abstractmethod - def is_dir(self) -> bool: - """ - Return True if self is a directory - """ - - @abc.abstractmethod - def is_file(self) -> bool: - """ - Return True if self is a file - """ - - def joinpath(self, *descendants: StrPath) -> "Traversable": - """ - Return Traversable resolved with any descendants applied. - - Each descendant should be a path segment relative to self - and each may contain multiple levels separated by - ``posixpath.sep`` (``/``). - """ - if not descendants: - return self - names = itertools.chain.from_iterable( - path.parts for path in map(pathlib.PurePosixPath, descendants) - ) - target = next(names) - matches = ( - traversable for traversable in self.iterdir() if traversable.name == target - ) - try: - match = next(matches) - except StopIteration: - raise TraversalError( - "Target not found during traversal.", target, list(names) - ) - return match.joinpath(*names) - - def __truediv__(self, child: StrPath) -> "Traversable": - """ - Return Traversable child in self - """ - return self.joinpath(child) - - @abc.abstractmethod - def open(self, mode='r', *args, **kwargs): - """ - mode may be 'r' or 'rb' to open as text or binary. Return a handle - suitable for reading (same as pathlib.Path.open). - - When opening as text, accepts encoding parameters such as those - accepted by io.TextIOWrapper. - """ - - @property - @abc.abstractmethod - def name(self) -> str: - """ - The base name of this object without any parent references. - """ - - -class TraversableResources(ResourceReader): - """ - The required interface for providing traversable - resources. - """ - - @abc.abstractmethod - def files(self) -> "Traversable": - """Return a Traversable object for the loaded package.""" - - def open_resource(self, resource: StrPath) -> io.BufferedReader: - return self.files().joinpath(resource).open('rb') - - def resource_path(self, resource: Any) -> NoReturn: - raise FileNotFoundError(resource) - - def is_resource(self, path: StrPath) -> bool: - return self.files().joinpath(path).is_file() - - def contents(self) -> Iterator[str]: - return (item.name for item in self.files().iterdir()) diff --git a/site-packages/importlib_resources/py.typed b/site-packages/importlib_resources/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources/readers.py b/site-packages/importlib_resources/readers.py deleted file mode 100644 index 1e2d1ba..0000000 --- a/site-packages/importlib_resources/readers.py +++ /dev/null @@ -1,172 +0,0 @@ -import collections -import contextlib -import itertools -import pathlib -import operator -import re - -from . import abc - -from ._itertools import only -from ._compat import ZipPath, ensure_traversable - - -def remove_duplicates(items): - return iter(collections.OrderedDict.fromkeys(items)) - - -class FileReader(abc.TraversableResources): - def __init__(self, loader): - self.path = pathlib.Path(loader.path).parent - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path - - -class ZipReader(abc.TraversableResources): - def __init__(self, loader, module): - _, _, name = module.rpartition('.') - self.prefix = loader.prefix.replace('\\', '/') + name + '/' - self.archive = loader.archive - - def open_resource(self, resource): - try: - return super().open_resource(resource) - except KeyError as exc: - raise FileNotFoundError(exc.args[0]) - - def is_resource(self, path): - """ - Workaround for `zipfile.Path.is_file` returning true - for non-existent paths. - """ - target = self.files().joinpath(path) - return target.is_file() and target.exists() - - def files(self): - return ZipPath(self.archive, self.prefix) - - -class MultiplexedPath(abc.Traversable): - """ - Given a series of Traversable objects, implement a merged - version of the interface across all objects. Useful for - namespace packages which may be multihomed at a single - name. - """ - - def __init__(self, *paths): - self._paths = list(map(ensure_traversable, remove_duplicates(paths))) - if not self._paths: - message = 'MultiplexedPath must contain at least one path' - raise FileNotFoundError(message) - if not all(path.is_dir() for path in self._paths): - raise NotADirectoryError('MultiplexedPath only supports directories') - - def iterdir(self): - children = (child for path in self._paths for child in path.iterdir()) - by_name = operator.attrgetter('name') - groups = itertools.groupby(sorted(children, key=by_name), key=by_name) - return map(self._follow, (locs for name, locs in groups)) - - def read_bytes(self): - raise FileNotFoundError(f'{self} is not a file') - - def read_text(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - def is_dir(self): - return True - - def is_file(self): - return False - - def joinpath(self, *descendants): - try: - return super().joinpath(*descendants) - except abc.TraversalError: - # One of the paths did not resolve (a directory does not exist). - # Just return something that will not exist. - return self._paths[0].joinpath(*descendants) - - @classmethod - def _follow(cls, children): - """ - Construct a MultiplexedPath if needed. - - If children contains a sole element, return it. - Otherwise, return a MultiplexedPath of the items. - Unless one of the items is not a Directory, then return the first. - """ - subdirs, one_dir, one_file = itertools.tee(children, 3) - - try: - return only(one_dir) - except ValueError: - try: - return cls(*subdirs) - except NotADirectoryError: - return next(one_file) - - def open(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - @property - def name(self): - return self._paths[0].name - - def __repr__(self): - paths = ', '.join(f"'{path}'" for path in self._paths) - return f'MultiplexedPath({paths})' - - -class NamespaceReader(abc.TraversableResources): - def __init__(self, namespace_path): - if 'NamespacePath' not in str(namespace_path): - raise ValueError('Invalid path') - self.path = MultiplexedPath(*map(self._resolve, namespace_path)) - - @classmethod - def _resolve(cls, path_str) -> abc.Traversable: - r""" - Given an item from a namespace path, resolve it to a Traversable. - - path_str might be a directory on the filesystem or a path to a - zipfile plus the path within the zipfile, e.g. ``/foo/bar`` or - ``/foo/baz.zip/inner_dir`` or ``foo\baz.zip\inner_dir\sub``. - """ - (dir,) = (cand for cand in cls._candidate_paths(path_str) if cand.is_dir()) - return dir - - @classmethod - def _candidate_paths(cls, path_str): - yield pathlib.Path(path_str) - yield from cls._resolve_zip_path(path_str) - - @staticmethod - def _resolve_zip_path(path_str): - for match in reversed(list(re.finditer(r'[\\/]', path_str))): - with contextlib.suppress( - FileNotFoundError, IsADirectoryError, PermissionError - ): - inner = path_str[match.end() :].replace('\\', '/') + '/' - yield ZipPath(path_str[: match.start()], inner.lstrip('/')) - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path diff --git a/site-packages/importlib_resources/simple.py b/site-packages/importlib_resources/simple.py deleted file mode 100644 index 7770c92..0000000 --- a/site-packages/importlib_resources/simple.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Interface adapters for low-level readers. -""" - -import abc -import io -import itertools -from typing import BinaryIO, List - -from .abc import Traversable, TraversableResources - - -class SimpleReader(abc.ABC): - """ - The minimum, low-level interface required from a resource - provider. - """ - - @property - @abc.abstractmethod - def package(self) -> str: - """ - The name of the package for which this reader loads resources. - """ - - @abc.abstractmethod - def children(self) -> List['SimpleReader']: - """ - Obtain an iterable of SimpleReader for available - child containers (e.g. directories). - """ - - @abc.abstractmethod - def resources(self) -> List[str]: - """ - Obtain available named resources for this virtual package. - """ - - @abc.abstractmethod - def open_binary(self, resource: str) -> BinaryIO: - """ - Obtain a File-like for a named resource. - """ - - @property - def name(self): - return self.package.split('.')[-1] - - -class ResourceContainer(Traversable): - """ - Traversable container for a package's resources via its reader. - """ - - def __init__(self, reader: SimpleReader): - self.reader = reader - - def is_dir(self): - return True - - def is_file(self): - return False - - def iterdir(self): - files = (ResourceHandle(self, name) for name in self.reader.resources) - dirs = map(ResourceContainer, self.reader.children()) - return itertools.chain(files, dirs) - - def open(self, *args, **kwargs): - raise IsADirectoryError() - - -class ResourceHandle(Traversable): - """ - Handle to a named resource in a ResourceReader. - """ - - def __init__(self, parent: ResourceContainer, name: str): - self.parent = parent - self.name = name # type: ignore - - def is_file(self): - return True - - def is_dir(self): - return False - - def open(self, mode='r', *args, **kwargs): - stream = self.parent.reader.open_binary(self.name) - if 'b' not in mode: - stream = io.TextIOWrapper(*args, **kwargs) - return stream - - def joinpath(self, name): - raise RuntimeError("Cannot traverse into a resource") - - -class TraversableReader(TraversableResources, SimpleReader): - """ - A TraversableResources based on SimpleReader. Resource providers - may derive from this class to provide the TraversableResources - interface by supplying the SimpleReader interface. - """ - - def files(self): - return ResourceContainer(self) diff --git a/site-packages/importlib_resources/tests/__init__.py b/site-packages/importlib_resources/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources/tests/_compat.py b/site-packages/importlib_resources/tests/_compat.py deleted file mode 100644 index e7bf06d..0000000 --- a/site-packages/importlib_resources/tests/_compat.py +++ /dev/null @@ -1,32 +0,0 @@ -import os - - -try: - from test.support import import_helper # type: ignore -except ImportError: - # Python 3.9 and earlier - class import_helper: # type: ignore - from test.support import ( - modules_setup, - modules_cleanup, - DirsOnSysPath, - CleanImport, - ) - - -try: - from test.support import os_helper # type: ignore -except ImportError: - # Python 3.9 compat - class os_helper: # type:ignore - from test.support import temp_dir - - -try: - # Python 3.10 - from test.support.os_helper import unlink -except ImportError: - from test.support import unlink as _unlink - - def unlink(target): - return _unlink(os.fspath(target)) diff --git a/site-packages/importlib_resources/tests/_path.py b/site-packages/importlib_resources/tests/_path.py deleted file mode 100644 index 1f97c96..0000000 --- a/site-packages/importlib_resources/tests/_path.py +++ /dev/null @@ -1,56 +0,0 @@ -import pathlib -import functools - -from typing import Dict, Union - - -#### -# from jaraco.path 3.4.1 - -FilesSpec = Dict[str, Union[str, bytes, 'FilesSpec']] # type: ignore - - -def build(spec: FilesSpec, prefix=pathlib.Path()): - """ - Build a set of files/directories, as described by the spec. - - Each key represents a pathname, and the value represents - the content. Content may be a nested directory. - - >>> spec = { - ... 'README.txt': "A README file", - ... "foo": { - ... "__init__.py": "", - ... "bar": { - ... "__init__.py": "", - ... }, - ... "baz.py": "# Some code", - ... } - ... } - >>> target = getfixture('tmp_path') - >>> build(spec, target) - >>> target.joinpath('foo/baz.py').read_text(encoding='utf-8') - '# Some code' - """ - for name, contents in spec.items(): - create(contents, pathlib.Path(prefix) / name) - - -@functools.singledispatch -def create(content: Union[str, bytes, FilesSpec], path): - path.mkdir(exist_ok=True) - build(content, prefix=path) # type: ignore - - -@create.register -def _(content: bytes, path): - path.write_bytes(content) - - -@create.register -def _(content: str, path): - path.write_text(content, encoding='utf-8') - - -# end from jaraco.path -#### diff --git a/site-packages/importlib_resources/tests/data01/__init__.py b/site-packages/importlib_resources/tests/data01/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources/tests/data01/binary.file b/site-packages/importlib_resources/tests/data01/binary.file deleted file mode 100644 index eaf36c1daccfdf325514461cd1a2ffbc139b5464..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmZQzWMT#Y01f~L diff --git a/site-packages/importlib_resources/tests/data01/subdirectory/__init__.py b/site-packages/importlib_resources/tests/data01/subdirectory/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources/tests/data01/subdirectory/binary.file b/site-packages/importlib_resources/tests/data01/subdirectory/binary.file deleted file mode 100644 index eaf36c1daccfdf325514461cd1a2ffbc139b5464..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmZQzWMT#Y01f~L diff --git a/site-packages/importlib_resources/tests/data01/utf-16.file b/site-packages/importlib_resources/tests/data01/utf-16.file deleted file mode 100644 index 2cb772295ef4b480a8d83725bd5006a0236d8f68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 ucmezW&x0YAAqNQa8FUyF7(y9B7~B|i84MZBfV^^`Xc15@g+Y;liva-T)Ce>H diff --git a/site-packages/importlib_resources/tests/data01/utf-8.file b/site-packages/importlib_resources/tests/data01/utf-8.file deleted file mode 100644 index 1c0132a..0000000 --- a/site-packages/importlib_resources/tests/data01/utf-8.file +++ /dev/null @@ -1 +0,0 @@ -Hello, UTF-8 world! diff --git a/site-packages/importlib_resources/tests/data02/__init__.py b/site-packages/importlib_resources/tests/data02/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources/tests/data02/one/__init__.py b/site-packages/importlib_resources/tests/data02/one/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources/tests/data02/one/resource1.txt b/site-packages/importlib_resources/tests/data02/one/resource1.txt deleted file mode 100644 index 61a813e..0000000 --- a/site-packages/importlib_resources/tests/data02/one/resource1.txt +++ /dev/null @@ -1 +0,0 @@ -one resource diff --git a/site-packages/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt b/site-packages/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt deleted file mode 100644 index 48f587a..0000000 --- a/site-packages/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt +++ /dev/null @@ -1 +0,0 @@ -a resource \ No newline at end of file diff --git a/site-packages/importlib_resources/tests/data02/two/__init__.py b/site-packages/importlib_resources/tests/data02/two/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/site-packages/importlib_resources/tests/data02/two/resource2.txt b/site-packages/importlib_resources/tests/data02/two/resource2.txt deleted file mode 100644 index a80ce46..0000000 --- a/site-packages/importlib_resources/tests/data02/two/resource2.txt +++ /dev/null @@ -1 +0,0 @@ -two resource diff --git a/site-packages/importlib_resources/tests/namespacedata01/binary.file b/site-packages/importlib_resources/tests/namespacedata01/binary.file deleted file mode 100644 index eaf36c1daccfdf325514461cd1a2ffbc139b5464..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmZQzWMT#Y01f~L diff --git a/site-packages/importlib_resources/tests/namespacedata01/utf-16.file b/site-packages/importlib_resources/tests/namespacedata01/utf-16.file deleted file mode 100644 index 2cb772295ef4b480a8d83725bd5006a0236d8f68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 ucmezW&x0YAAqNQa8FUyF7(y9B7~B|i84MZBfV^^`Xc15@g+Y;liva-T)Ce>H diff --git a/site-packages/importlib_resources/tests/namespacedata01/utf-8.file b/site-packages/importlib_resources/tests/namespacedata01/utf-8.file deleted file mode 100644 index 1c0132a..0000000 --- a/site-packages/importlib_resources/tests/namespacedata01/utf-8.file +++ /dev/null @@ -1 +0,0 @@ -Hello, UTF-8 world! diff --git a/site-packages/importlib_resources/tests/test_compatibilty_files.py b/site-packages/importlib_resources/tests/test_compatibilty_files.py deleted file mode 100644 index 13ad0df..0000000 --- a/site-packages/importlib_resources/tests/test_compatibilty_files.py +++ /dev/null @@ -1,104 +0,0 @@ -import io -import unittest - -import importlib_resources as resources - -from importlib_resources._adapters import ( - CompatibilityFiles, - wrap_spec, -) - -from . import util - - -class CompatibilityFilesTests(unittest.TestCase): - @property - def package(self): - bytes_data = io.BytesIO(b'Hello, world!') - return util.create_package( - file=bytes_data, - path='some_path', - contents=('a', 'b', 'c'), - ) - - @property - def files(self): - return resources.files(self.package) - - def test_spec_path_iter(self): - self.assertEqual( - sorted(path.name for path in self.files.iterdir()), - ['a', 'b', 'c'], - ) - - def test_child_path_iter(self): - self.assertEqual(list((self.files / 'a').iterdir()), []) - - def test_orphan_path_iter(self): - self.assertEqual(list((self.files / 'a' / 'a').iterdir()), []) - self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), []) - - def test_spec_path_is(self): - self.assertFalse(self.files.is_file()) - self.assertFalse(self.files.is_dir()) - - def test_child_path_is(self): - self.assertTrue((self.files / 'a').is_file()) - self.assertFalse((self.files / 'a').is_dir()) - - def test_orphan_path_is(self): - self.assertFalse((self.files / 'a' / 'a').is_file()) - self.assertFalse((self.files / 'a' / 'a').is_dir()) - self.assertFalse((self.files / 'a' / 'a' / 'a').is_file()) - self.assertFalse((self.files / 'a' / 'a' / 'a').is_dir()) - - def test_spec_path_name(self): - self.assertEqual(self.files.name, 'testingpackage') - - def test_child_path_name(self): - self.assertEqual((self.files / 'a').name, 'a') - - def test_orphan_path_name(self): - self.assertEqual((self.files / 'a' / 'b').name, 'b') - self.assertEqual((self.files / 'a' / 'b' / 'c').name, 'c') - - def test_spec_path_open(self): - self.assertEqual(self.files.read_bytes(), b'Hello, world!') - self.assertEqual(self.files.read_text(encoding='utf-8'), 'Hello, world!') - - def test_child_path_open(self): - self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!') - self.assertEqual( - (self.files / 'a').read_text(encoding='utf-8'), 'Hello, world!' - ) - - def test_orphan_path_open(self): - with self.assertRaises(FileNotFoundError): - (self.files / 'a' / 'b').read_bytes() - with self.assertRaises(FileNotFoundError): - (self.files / 'a' / 'b' / 'c').read_bytes() - - def test_open_invalid_mode(self): - with self.assertRaises(ValueError): - self.files.open('0') - - def test_orphan_path_invalid(self): - with self.assertRaises(ValueError): - CompatibilityFiles.OrphanPath() - - def test_wrap_spec(self): - spec = wrap_spec(self.package) - self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles) - - -class CompatibilityFilesNoReaderTests(unittest.TestCase): - @property - def package(self): - return util.create_package_from_loader(None) - - @property - def files(self): - return resources.files(self.package) - - def test_spec_path_joinpath(self): - self.assertIsInstance(self.files / 'a', CompatibilityFiles.OrphanPath) diff --git a/site-packages/importlib_resources/tests/test_contents.py b/site-packages/importlib_resources/tests/test_contents.py deleted file mode 100644 index 525568e..0000000 --- a/site-packages/importlib_resources/tests/test_contents.py +++ /dev/null @@ -1,43 +0,0 @@ -import unittest -import importlib_resources as resources - -from . import data01 -from . import util - - -class ContentsTests: - expected = { - '__init__.py', - 'binary.file', - 'subdirectory', - 'utf-16.file', - 'utf-8.file', - } - - def test_contents(self): - contents = {path.name for path in resources.files(self.data).iterdir()} - assert self.expected <= contents - - -class ContentsDiskTests(ContentsTests, unittest.TestCase): - def setUp(self): - self.data = data01 - - -class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase): - pass - - -class ContentsNamespaceTests(ContentsTests, unittest.TestCase): - expected = { - # no __init__ because of namespace design - # no subdirectory as incidental difference in fixture - 'binary.file', - 'utf-16.file', - 'utf-8.file', - } - - def setUp(self): - from . import namespacedata01 - - self.data = namespacedata01 diff --git a/site-packages/importlib_resources/tests/test_custom.py b/site-packages/importlib_resources/tests/test_custom.py deleted file mode 100644 index e85ddd6..0000000 --- a/site-packages/importlib_resources/tests/test_custom.py +++ /dev/null @@ -1,45 +0,0 @@ -import unittest -import contextlib -import pathlib - -import importlib_resources as resources -from ..abc import TraversableResources, ResourceReader -from . import util -from ._compat import os_helper - - -class SimpleLoader: - """ - A simple loader that only implements a resource reader. - """ - - def __init__(self, reader: ResourceReader): - self.reader = reader - - def get_resource_reader(self, package): - return self.reader - - -class MagicResources(TraversableResources): - """ - Magically returns the resources at path. - """ - - def __init__(self, path: pathlib.Path): - self.path = path - - def files(self): - return self.path - - -class CustomTraversableResourcesTests(unittest.TestCase): - def setUp(self): - self.fixtures = contextlib.ExitStack() - self.addCleanup(self.fixtures.close) - - def test_custom_loader(self): - temp_dir = self.fixtures.enter_context(os_helper.temp_dir()) - loader = SimpleLoader(MagicResources(temp_dir)) - pkg = util.create_package_from_loader(loader) - files = resources.files(pkg) - assert files is temp_dir diff --git a/site-packages/importlib_resources/tests/test_files.py b/site-packages/importlib_resources/tests/test_files.py deleted file mode 100644 index 197a063..0000000 --- a/site-packages/importlib_resources/tests/test_files.py +++ /dev/null @@ -1,112 +0,0 @@ -import typing -import textwrap -import unittest -import warnings -import importlib -import contextlib - -import importlib_resources as resources -from ..abc import Traversable -from . import data01 -from . import util -from . import _path -from ._compat import os_helper, import_helper - - -@contextlib.contextmanager -def suppress_known_deprecation(): - with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter('default', category=DeprecationWarning) - yield ctx - - -class FilesTests: - def test_read_bytes(self): - files = resources.files(self.data) - actual = files.joinpath('utf-8.file').read_bytes() - assert actual == b'Hello, UTF-8 world!\n' - - def test_read_text(self): - files = resources.files(self.data) - actual = files.joinpath('utf-8.file').read_text(encoding='utf-8') - assert actual == 'Hello, UTF-8 world!\n' - - @unittest.skipUnless( - hasattr(typing, 'runtime_checkable'), - "Only suitable when typing supports runtime_checkable", - ) - def test_traversable(self): - assert isinstance(resources.files(self.data), Traversable) - - def test_old_parameter(self): - """ - Files used to take a 'package' parameter. Make sure anyone - passing by name is still supported. - """ - with suppress_known_deprecation(): - resources.files(package=self.data) - - -class OpenDiskTests(FilesTests, unittest.TestCase): - def setUp(self): - self.data = data01 - - -class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase): - pass - - -class OpenNamespaceTests(FilesTests, unittest.TestCase): - def setUp(self): - from . import namespacedata01 - - self.data = namespacedata01 - - -class SiteDir: - def setUp(self): - self.fixtures = contextlib.ExitStack() - self.addCleanup(self.fixtures.close) - self.site_dir = self.fixtures.enter_context(os_helper.temp_dir()) - self.fixtures.enter_context(import_helper.DirsOnSysPath(self.site_dir)) - self.fixtures.enter_context(import_helper.CleanImport()) - - -class ModulesFilesTests(SiteDir, unittest.TestCase): - def test_module_resources(self): - """ - A module can have resources found adjacent to the module. - """ - spec = { - 'mod.py': '', - 'res.txt': 'resources are the best', - } - _path.build(spec, self.site_dir) - import mod - - actual = resources.files(mod).joinpath('res.txt').read_text(encoding='utf-8') - assert actual == spec['res.txt'] - - -class ImplicitContextFilesTests(SiteDir, unittest.TestCase): - def test_implicit_files(self): - """ - Without any parameter, files() will infer the location as the caller. - """ - spec = { - 'somepkg': { - '__init__.py': textwrap.dedent( - """ - import importlib_resources as res - val = res.files().joinpath('res.txt').read_text(encoding='utf-8') - """ - ), - 'res.txt': 'resources are the best', - }, - } - _path.build(spec, self.site_dir) - assert importlib.import_module('somepkg').val == 'resources are the best' - - -if __name__ == '__main__': - unittest.main() diff --git a/site-packages/importlib_resources/tests/test_open.py b/site-packages/importlib_resources/tests/test_open.py deleted file mode 100644 index 83b737d..0000000 --- a/site-packages/importlib_resources/tests/test_open.py +++ /dev/null @@ -1,85 +0,0 @@ -import unittest - -import importlib_resources as resources -from . import data01 -from . import util - - -class CommonBinaryTests(util.CommonTests, unittest.TestCase): - def execute(self, package, path): - target = resources.files(package).joinpath(path) - with target.open('rb'): - pass - - -class CommonTextTests(util.CommonTests, unittest.TestCase): - def execute(self, package, path): - target = resources.files(package).joinpath(path) - with target.open(encoding='utf-8'): - pass - - -class OpenTests: - def test_open_binary(self): - target = resources.files(self.data) / 'binary.file' - with target.open('rb') as fp: - result = fp.read() - self.assertEqual(result, b'\x00\x01\x02\x03') - - def test_open_text_default_encoding(self): - target = resources.files(self.data) / 'utf-8.file' - with target.open(encoding='utf-8') as fp: - result = fp.read() - self.assertEqual(result, 'Hello, UTF-8 world!\n') - - def test_open_text_given_encoding(self): - target = resources.files(self.data) / 'utf-16.file' - with target.open(encoding='utf-16', errors='strict') as fp: - result = fp.read() - self.assertEqual(result, 'Hello, UTF-16 world!\n') - - def test_open_text_with_errors(self): - """ - Raises UnicodeError without the 'errors' argument. - """ - target = resources.files(self.data) / 'utf-16.file' - with target.open(encoding='utf-8', errors='strict') as fp: - self.assertRaises(UnicodeError, fp.read) - with target.open(encoding='utf-8', errors='ignore') as fp: - result = fp.read() - self.assertEqual( - result, - 'H\x00e\x00l\x00l\x00o\x00,\x00 ' - '\x00U\x00T\x00F\x00-\x001\x006\x00 ' - '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00', - ) - - def test_open_binary_FileNotFoundError(self): - target = resources.files(self.data) / 'does-not-exist' - with self.assertRaises(FileNotFoundError): - target.open('rb') - - def test_open_text_FileNotFoundError(self): - target = resources.files(self.data) / 'does-not-exist' - with self.assertRaises(FileNotFoundError): - target.open(encoding='utf-8') - - -class OpenDiskTests(OpenTests, unittest.TestCase): - def setUp(self): - self.data = data01 - - -class OpenDiskNamespaceTests(OpenTests, unittest.TestCase): - def setUp(self): - from . import namespacedata01 - - self.data = namespacedata01 - - -class OpenZipTests(OpenTests, util.ZipSetup, unittest.TestCase): - pass - - -if __name__ == '__main__': - unittest.main() diff --git a/site-packages/importlib_resources/tests/test_path.py b/site-packages/importlib_resources/tests/test_path.py deleted file mode 100644 index 7cb2000..0000000 --- a/site-packages/importlib_resources/tests/test_path.py +++ /dev/null @@ -1,69 +0,0 @@ -import io -import unittest - -import importlib_resources as resources -from . import data01 -from . import util - - -class CommonTests(util.CommonTests, unittest.TestCase): - def execute(self, package, path): - with resources.as_file(resources.files(package).joinpath(path)): - pass - - -class PathTests: - def test_reading(self): - """ - Path should be readable. - - Test also implicitly verifies the returned object is a pathlib.Path - instance. - """ - target = resources.files(self.data) / 'utf-8.file' - with resources.as_file(target) as path: - self.assertTrue(path.name.endswith("utf-8.file"), repr(path)) - # pathlib.Path.read_text() was introduced in Python 3.5. - with path.open('r', encoding='utf-8') as file: - text = file.read() - self.assertEqual('Hello, UTF-8 world!\n', text) - - -class PathDiskTests(PathTests, unittest.TestCase): - data = data01 - - def test_natural_path(self): - """ - Guarantee the internal implementation detail that - file-system-backed resources do not get the tempdir - treatment. - """ - target = resources.files(self.data) / 'utf-8.file' - with resources.as_file(target) as path: - assert 'data' in str(path) - - -class PathMemoryTests(PathTests, unittest.TestCase): - def setUp(self): - file = io.BytesIO(b'Hello, UTF-8 world!\n') - self.addCleanup(file.close) - self.data = util.create_package( - file=file, path=FileNotFoundError("package exists only in memory") - ) - self.data.__spec__.origin = None - self.data.__spec__.has_location = False - - -class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase): - def test_remove_in_context_manager(self): - """ - It is not an error if the file that was temporarily stashed on the - file system is removed inside the `with` stanza. - """ - target = resources.files(self.data) / 'utf-8.file' - with resources.as_file(target) as path: - path.unlink() - - -if __name__ == '__main__': - unittest.main() diff --git a/site-packages/importlib_resources/tests/test_read.py b/site-packages/importlib_resources/tests/test_read.py deleted file mode 100644 index 5b83221..0000000 --- a/site-packages/importlib_resources/tests/test_read.py +++ /dev/null @@ -1,80 +0,0 @@ -import unittest -import importlib_resources as resources - -from . import data01 -from . import util -from importlib import import_module - - -class CommonBinaryTests(util.CommonTests, unittest.TestCase): - def execute(self, package, path): - resources.files(package).joinpath(path).read_bytes() - - -class CommonTextTests(util.CommonTests, unittest.TestCase): - def execute(self, package, path): - resources.files(package).joinpath(path).read_text(encoding='utf-8') - - -class ReadTests: - def test_read_bytes(self): - result = resources.files(self.data).joinpath('binary.file').read_bytes() - self.assertEqual(result, b'\0\1\2\3') - - def test_read_text_default_encoding(self): - result = ( - resources.files(self.data) - .joinpath('utf-8.file') - .read_text(encoding='utf-8') - ) - self.assertEqual(result, 'Hello, UTF-8 world!\n') - - def test_read_text_given_encoding(self): - result = ( - resources.files(self.data) - .joinpath('utf-16.file') - .read_text(encoding='utf-16') - ) - self.assertEqual(result, 'Hello, UTF-16 world!\n') - - def test_read_text_with_errors(self): - """ - Raises UnicodeError without the 'errors' argument. - """ - target = resources.files(self.data) / 'utf-16.file' - self.assertRaises(UnicodeError, target.read_text, encoding='utf-8') - result = target.read_text(encoding='utf-8', errors='ignore') - self.assertEqual( - result, - 'H\x00e\x00l\x00l\x00o\x00,\x00 ' - '\x00U\x00T\x00F\x00-\x001\x006\x00 ' - '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00', - ) - - -class ReadDiskTests(ReadTests, unittest.TestCase): - data = data01 - - -class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase): - def test_read_submodule_resource(self): - submodule = import_module('data01.subdirectory') - result = resources.files(submodule).joinpath('binary.file').read_bytes() - self.assertEqual(result, b'\0\1\2\3') - - def test_read_submodule_resource_by_name(self): - result = ( - resources.files('data01.subdirectory').joinpath('binary.file').read_bytes() - ) - self.assertEqual(result, b'\0\1\2\3') - - -class ReadNamespaceTests(ReadTests, unittest.TestCase): - def setUp(self): - from . import namespacedata01 - - self.data = namespacedata01 - - -if __name__ == '__main__': - unittest.main() diff --git a/site-packages/importlib_resources/tests/test_reader.py b/site-packages/importlib_resources/tests/test_reader.py deleted file mode 100644 index a1eadb2..0000000 --- a/site-packages/importlib_resources/tests/test_reader.py +++ /dev/null @@ -1,143 +0,0 @@ -import os.path -import sys -import pathlib -import unittest - -from importlib import import_module -from importlib_resources.readers import MultiplexedPath, NamespaceReader - - -class MultiplexedPathTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.folder = pathlib.Path(__file__).parent / 'namespacedata01' - - def test_init_no_paths(self): - with self.assertRaises(FileNotFoundError): - MultiplexedPath() - - def test_init_file(self): - with self.assertRaises(NotADirectoryError): - MultiplexedPath(self.folder / 'binary.file') - - def test_iterdir(self): - contents = {path.name for path in MultiplexedPath(self.folder).iterdir()} - try: - contents.remove('__pycache__') - except (KeyError, ValueError): - pass - self.assertEqual(contents, {'binary.file', 'utf-16.file', 'utf-8.file'}) - - def test_iterdir_duplicate(self): - data01 = pathlib.Path(__file__).parent.joinpath('data01') - contents = { - path.name for path in MultiplexedPath(self.folder, data01).iterdir() - } - for remove in ('__pycache__', '__init__.pyc'): - try: - contents.remove(remove) - except (KeyError, ValueError): - pass - self.assertEqual( - contents, - {'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'}, - ) - - def test_is_dir(self): - self.assertEqual(MultiplexedPath(self.folder).is_dir(), True) - - def test_is_file(self): - self.assertEqual(MultiplexedPath(self.folder).is_file(), False) - - def test_open_file(self): - path = MultiplexedPath(self.folder) - with self.assertRaises(FileNotFoundError): - path.read_bytes() - with self.assertRaises(FileNotFoundError): - path.read_text() - with self.assertRaises(FileNotFoundError): - path.open() - - def test_join_path(self): - data01 = pathlib.Path(__file__).parent.joinpath('data01') - prefix = str(data01.parent) - path = MultiplexedPath(self.folder, data01) - self.assertEqual( - str(path.joinpath('binary.file'))[len(prefix) + 1 :], - os.path.join('namespacedata01', 'binary.file'), - ) - self.assertEqual( - str(path.joinpath('subdirectory'))[len(prefix) + 1 :], - os.path.join('data01', 'subdirectory'), - ) - self.assertEqual( - str(path.joinpath('imaginary'))[len(prefix) + 1 :], - os.path.join('namespacedata01', 'imaginary'), - ) - self.assertEqual(path.joinpath(), path) - - def test_join_path_compound(self): - path = MultiplexedPath(self.folder) - assert not path.joinpath('imaginary/foo.py').exists() - - def test_join_path_common_subdir(self): - data01 = pathlib.Path(__file__).parent.joinpath('data01') - data02 = pathlib.Path(__file__).parent.joinpath('data02') - prefix = str(data01.parent) - path = MultiplexedPath(data01, data02) - self.assertIsInstance(path.joinpath('subdirectory'), MultiplexedPath) - self.assertEqual( - str(path.joinpath('subdirectory', 'subsubdir'))[len(prefix) + 1 :], - os.path.join('data02', 'subdirectory', 'subsubdir'), - ) - - def test_repr(self): - self.assertEqual( - repr(MultiplexedPath(self.folder)), - f"MultiplexedPath('{self.folder}')", - ) - - def test_name(self): - self.assertEqual( - MultiplexedPath(self.folder).name, - os.path.basename(self.folder), - ) - - -class NamespaceReaderTest(unittest.TestCase): - site_dir = str(pathlib.Path(__file__).parent) - - @classmethod - def setUpClass(cls): - sys.path.append(cls.site_dir) - - @classmethod - def tearDownClass(cls): - sys.path.remove(cls.site_dir) - - def test_init_error(self): - with self.assertRaises(ValueError): - NamespaceReader(['path1', 'path2']) - - def test_resource_path(self): - namespacedata01 = import_module('namespacedata01') - reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations) - - root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01')) - self.assertEqual( - reader.resource_path('binary.file'), os.path.join(root, 'binary.file') - ) - self.assertEqual( - reader.resource_path('imaginary'), os.path.join(root, 'imaginary') - ) - - def test_files(self): - namespacedata01 = import_module('namespacedata01') - reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations) - root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01')) - self.assertIsInstance(reader.files(), MultiplexedPath) - self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')") - - -if __name__ == '__main__': - unittest.main() diff --git a/site-packages/importlib_resources/tests/test_resource.py b/site-packages/importlib_resources/tests/test_resource.py deleted file mode 100644 index de7d734..0000000 --- a/site-packages/importlib_resources/tests/test_resource.py +++ /dev/null @@ -1,221 +0,0 @@ -import sys -import unittest -import importlib_resources as resources -import pathlib - -from . import data01 -from . import util -from importlib import import_module - - -class ResourceTests: - # Subclasses are expected to set the `data` attribute. - - def test_is_file_exists(self): - target = resources.files(self.data) / 'binary.file' - self.assertTrue(target.is_file()) - - def test_is_file_missing(self): - target = resources.files(self.data) / 'not-a-file' - self.assertFalse(target.is_file()) - - def test_is_dir(self): - target = resources.files(self.data) / 'subdirectory' - self.assertFalse(target.is_file()) - self.assertTrue(target.is_dir()) - - -class ResourceDiskTests(ResourceTests, unittest.TestCase): - def setUp(self): - self.data = data01 - - -class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase): - pass - - -def names(traversable): - return {item.name for item in traversable.iterdir()} - - -class ResourceLoaderTests(unittest.TestCase): - def test_resource_contents(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C'] - ) - self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'}) - - def test_is_file(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] - ) - self.assertTrue(resources.files(package).joinpath('B').is_file()) - - def test_is_dir(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] - ) - self.assertTrue(resources.files(package).joinpath('D').is_dir()) - - def test_resource_missing(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] - ) - self.assertFalse(resources.files(package).joinpath('Z').is_file()) - - -class ResourceCornerCaseTests(unittest.TestCase): - def test_package_has_no_reader_fallback(self): - """ - Test odd ball packages which: - # 1. Do not have a ResourceReader as a loader - # 2. Are not on the file system - # 3. Are not in a zip file - """ - module = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C'] - ) - # Give the module a dummy loader. - module.__loader__ = object() - # Give the module a dummy origin. - module.__file__ = '/path/which/shall/not/be/named' - module.__spec__.loader = module.__loader__ - module.__spec__.origin = module.__file__ - self.assertFalse(resources.files(module).joinpath('A').is_file()) - - -class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase): - ZIP_MODULE = 'data01' - - def test_is_submodule_resource(self): - submodule = import_module('data01.subdirectory') - self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file()) - - def test_read_submodule_resource_by_name(self): - self.assertTrue( - resources.files('data01.subdirectory').joinpath('binary.file').is_file() - ) - - def test_submodule_contents(self): - submodule = import_module('data01.subdirectory') - self.assertEqual( - names(resources.files(submodule)), {'__init__.py', 'binary.file'} - ) - - def test_submodule_contents_by_name(self): - self.assertEqual( - names(resources.files('data01.subdirectory')), - {'__init__.py', 'binary.file'}, - ) - - def test_as_file_directory(self): - with resources.as_file(resources.files('data01')) as data: - assert data.name == 'data01' - assert data.is_dir() - assert data.joinpath('subdirectory').is_dir() - assert len(list(data.iterdir())) - assert not data.parent.exists() - - -class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase): - ZIP_MODULE = 'data02' - - def test_unrelated_contents(self): - """ - Test thata zip with two unrelated subpackages return - distinct resources. Ref python/importlib_resources#44. - """ - self.assertEqual( - names(resources.files('data02.one')), - {'__init__.py', 'resource1.txt'}, - ) - self.assertEqual( - names(resources.files('data02.two')), - {'__init__.py', 'resource2.txt'}, - ) - - -class DeletingZipsTest(util.ZipSetupBase, unittest.TestCase): - """Having accessed resources in a zip file should not keep an open - reference to the zip. - """ - - def test_iterdir_does_not_keep_open(self): - [item.name for item in resources.files('data01').iterdir()] - - def test_is_file_does_not_keep_open(self): - resources.files('data01').joinpath('binary.file').is_file() - - def test_is_file_failure_does_not_keep_open(self): - resources.files('data01').joinpath('not-present').is_file() - - @unittest.skip("Desired but not supported.") - def test_as_file_does_not_keep_open(self): # pragma: no cover - resources.as_file(resources.files('data01') / 'binary.file') - - def test_entered_path_does_not_keep_open(self): - """ - Mimic what certifi does on import to make its bundle - available for the process duration. - """ - resources.as_file(resources.files('data01') / 'binary.file').__enter__() - - def test_read_binary_does_not_keep_open(self): - resources.files('data01').joinpath('binary.file').read_bytes() - - def test_read_text_does_not_keep_open(self): - resources.files('data01').joinpath('utf-8.file').read_text(encoding='utf-8') - - -class ResourceFromNamespaceTests: - def test_is_submodule_resource(self): - self.assertTrue( - resources.files(import_module('namespacedata01')) - .joinpath('binary.file') - .is_file() - ) - - def test_read_submodule_resource_by_name(self): - self.assertTrue( - resources.files('namespacedata01').joinpath('binary.file').is_file() - ) - - def test_submodule_contents(self): - contents = names(resources.files(import_module('namespacedata01'))) - try: - contents.remove('__pycache__') - except KeyError: - pass - self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'}) - - def test_submodule_contents_by_name(self): - contents = names(resources.files('namespacedata01')) - try: - contents.remove('__pycache__') - except KeyError: - pass - self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'}) - - -class ResourceFromNamespaceDiskTests(ResourceFromNamespaceTests, unittest.TestCase): - site_dir = str(pathlib.Path(__file__).parent) - - @classmethod - def setUpClass(cls): - sys.path.append(cls.site_dir) - - @classmethod - def tearDownClass(cls): - sys.path.remove(cls.site_dir) - - -class ResourceFromNamespaceZipTests( - util.ZipSetupBase, - ResourceFromNamespaceTests, - unittest.TestCase, -): - ZIP_MODULE = 'namespacedata01' - - -if __name__ == '__main__': - unittest.main() diff --git a/site-packages/importlib_resources/tests/util.py b/site-packages/importlib_resources/tests/util.py deleted file mode 100644 index 066f411..0000000 --- a/site-packages/importlib_resources/tests/util.py +++ /dev/null @@ -1,165 +0,0 @@ -import abc -import importlib -import io -import sys -import types -import pathlib -import contextlib - -from . import data01 -from ..abc import ResourceReader -from ._compat import import_helper, os_helper -from . import zip as zip_ - - -from importlib.machinery import ModuleSpec - - -class Reader(ResourceReader): - def __init__(self, **kwargs): - vars(self).update(kwargs) - - def get_resource_reader(self, package): - return self - - def open_resource(self, path): - self._path = path - if isinstance(self.file, Exception): - raise self.file - return self.file - - def resource_path(self, path_): - self._path = path_ - if isinstance(self.path, Exception): - raise self.path - return self.path - - def is_resource(self, path_): - self._path = path_ - if isinstance(self.path, Exception): - raise self.path - - def part(entry): - return entry.split('/') - - return any( - len(parts) == 1 and parts[0] == path_ for parts in map(part, self._contents) - ) - - def contents(self): - if isinstance(self.path, Exception): - raise self.path - yield from self._contents - - -def create_package_from_loader(loader, is_package=True): - name = 'testingpackage' - module = types.ModuleType(name) - spec = ModuleSpec(name, loader, origin='does-not-exist', is_package=is_package) - module.__spec__ = spec - module.__loader__ = loader - return module - - -def create_package(file=None, path=None, is_package=True, contents=()): - return create_package_from_loader( - Reader(file=file, path=path, _contents=contents), - is_package, - ) - - -class CommonTests(metaclass=abc.ABCMeta): - """ - Tests shared by test_open, test_path, and test_read. - """ - - @abc.abstractmethod - def execute(self, package, path): - """ - Call the pertinent legacy API function (e.g. open_text, path) - on package and path. - """ - - def test_package_name(self): - """ - Passing in the package name should succeed. - """ - self.execute(data01.__name__, 'utf-8.file') - - def test_package_object(self): - """ - Passing in the package itself should succeed. - """ - self.execute(data01, 'utf-8.file') - - def test_string_path(self): - """ - Passing in a string for the path should succeed. - """ - path = 'utf-8.file' - self.execute(data01, path) - - def test_pathlib_path(self): - """ - Passing in a pathlib.PurePath object for the path should succeed. - """ - path = pathlib.PurePath('utf-8.file') - self.execute(data01, path) - - def test_importing_module_as_side_effect(self): - """ - The anchor package can already be imported. - """ - del sys.modules[data01.__name__] - self.execute(data01.__name__, 'utf-8.file') - - def test_missing_path(self): - """ - Attempting to open or read or request the path for a - non-existent path should succeed if open_resource - can return a viable data stream. - """ - bytes_data = io.BytesIO(b'Hello, world!') - package = create_package(file=bytes_data, path=FileNotFoundError()) - self.execute(package, 'utf-8.file') - self.assertEqual(package.__loader__._path, 'utf-8.file') - - def test_extant_path(self): - # Attempting to open or read or request the path when the - # path does exist should still succeed. Does not assert - # anything about the result. - bytes_data = io.BytesIO(b'Hello, world!') - # any path that exists - path = __file__ - package = create_package(file=bytes_data, path=path) - self.execute(package, 'utf-8.file') - self.assertEqual(package.__loader__._path, 'utf-8.file') - - def test_useless_loader(self): - package = create_package(file=FileNotFoundError(), path=FileNotFoundError()) - with self.assertRaises(FileNotFoundError): - self.execute(package, 'utf-8.file') - - -class ZipSetupBase: - ZIP_MODULE = 'data01' - - def setUp(self): - self.fixtures = contextlib.ExitStack() - self.addCleanup(self.fixtures.close) - - modules = import_helper.modules_setup() - self.addCleanup(import_helper.modules_cleanup, *modules) - - temp_dir = self.fixtures.enter_context(os_helper.temp_dir()) - modules = pathlib.Path(temp_dir) / 'zipped modules.zip' - src_path = pathlib.Path(__file__).parent.joinpath(self.ZIP_MODULE) - self.fixtures.enter_context( - import_helper.DirsOnSysPath(str(zip_.make_zip_file(src_path, modules))) - ) - - self.data = importlib.import_module(self.ZIP_MODULE) - - -class ZipSetup(ZipSetupBase): - pass diff --git a/site-packages/importlib_resources/tests/zip.py b/site-packages/importlib_resources/tests/zip.py deleted file mode 100644 index 962195a..0000000 --- a/site-packages/importlib_resources/tests/zip.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Generate zip test data files. -""" - -import contextlib -import os -import pathlib -import zipfile - -import zipp - - -def make_zip_file(src, dst): - """ - Zip the files in src into a new zipfile at dst. - """ - with zipfile.ZipFile(dst, 'w') as zf: - for src_path, rel in walk(src): - dst_name = src.name / pathlib.PurePosixPath(rel.as_posix()) - zf.write(src_path, dst_name) - zipp.CompleteDirs.inject(zf) - return dst - - -def walk(datapath): - for dirpath, dirnames, filenames in os.walk(datapath): - with contextlib.suppress(ValueError): - dirnames.remove('__pycache__') - for filename in filenames: - res = pathlib.Path(dirpath) / filename - rel = res.relative_to(datapath) - yield res, rel diff --git a/site-packages/markdown_it/.DS_Store b/site-packages/markdown_it/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 @@ -42,14 +42,19 @@ The OpenAI Python library provides convenient access to the OpenAI REST API from application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). +It is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi) with [Stainless](https://stainlessapi.com/). + ## Documentation The API documentation can be found [here](https://platform.openai.com/docs). ## Installation +> [!IMPORTANT] +> The SDK was rewritten in v1, which was released November 6th 2023. See the [v1 migration guide](https://github.com/openai/openai-python/discussions/742), which includes scripts to automatically update your code. + ```sh -pip install --pre openai +pip install openai ``` ## Usage @@ -64,16 +69,15 @@ client = OpenAI( api_key="My API Key", ) -completion = client.chat.completions.create( - model="gpt-3.5-turbo", +chat_completion = client.chat.completions.create( messages=[ { "role": "user", "content": "Say this is a test", } ], + model="gpt-3.5-turbo", ) -print(completion.choices) ``` While you can provide an `api_key` keyword argument, @@ -86,6 +90,7 @@ so that your API Key is not stored in source control. Simply import `AsyncOpenAI` instead of `OpenAI` and use `await` with each API call: ```python +import asyncio from openai import AsyncOpenAI client = AsyncOpenAI( @@ -94,17 +99,16 @@ client = AsyncOpenAI( ) -async def main(): - completion = await client.chat.completions.create( - model="gpt-3.5-turbo", +async def main() -> None: + chat_completion = await client.chat.completions.create( messages=[ { "role": "user", "content": "Say this is a test", } ], + model="gpt-3.5-turbo", ) - print(completion.choices) asyncio.run(main()) @@ -121,9 +125,9 @@ from openai import OpenAI client = OpenAI() -stream = client.completions.create( - prompt="Say this is a test", - model="text-davinci-003", +stream = client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Say this is a test"}], stream=True, ) for part in stream: @@ -137,9 +141,9 @@ from openai import AsyncOpenAI client = AsyncOpenAI() -stream = await client.completions.create( +stream = await client.chat.completions.create( prompt="Say this is a test", - model="text-davinci-003", + messages=[{"role": "user", "content": "Say this is a test"}], stream=True, ) async for part in stream: @@ -272,7 +276,16 @@ from openai import OpenAI client = OpenAI() -client.files.list() +completion = client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "Can you generate an example json object describing a fruit?", + } + ], + model="gpt-3.5-turbo", + response_format={"type": "json_object"}, +) ``` ## File Uploads @@ -355,13 +368,13 @@ client = OpenAI( # Or, configure per-request: client.with_options(max_retries=5).chat.completions.create( - model="gpt-3.5-turbo", messages=[ { "role": "user", "content": "How can I get the name of the current day in Node.js?", } ], + model="gpt-3.5-turbo", ) ``` @@ -386,13 +399,13 @@ client = OpenAI( # Override per-request: client.with_options(timeout=5 * 1000).chat.completions.create( - model="gpt-3.5-turbo", messages=[ { "role": "user", "content": "How can I list all files in a directory using Python?", } ], + model="gpt-3.5-turbo", ) ``` @@ -434,19 +447,18 @@ from openai import OpenAI client = OpenAI() response = client.chat.completions.with_raw_response.create( messages=[{ - "content": "string", - "role": "system", + "role": "user", + "content": "Say this is a test", }], model="gpt-3.5-turbo", ) - print(response.headers.get('X-My-Header')) completion = response.parse() # get the object that `chat.completions.create()` would have returned -print(completion.choices) +print(completion) ``` -These methods return an [`APIResponse`](https://github.com/openai/openai-python/tree/v1/src/openai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object. ### Configuring the HTTP client @@ -478,7 +490,7 @@ By default the library closes underlying HTTP connections whenever the client is To use this library with [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview), use the `AzureOpenAI` class instead of the `OpenAI` class. -> [!IMPORTANT] +> [!IMPORTANT] > The Azure API shape differs from the core API shape which means that the static types for responses / params > won't always be correct. diff --git a/site-packages/openai-1.2.4.dist-info/RECORD b/site-packages/openai-1.2.4.dist-info/RECORD new file mode 100644 index 0000000..cf9dfe6 --- /dev/null +++ b/site-packages/openai-1.2.4.dist-info/RECORD @@ -0,0 +1,392 @@ +../../../bin/openai,sha256=CPXrhKqOEYUHqlDvRInx7n7aO03SrYDT2A7LuC2BB0k,240 +openai-1.2.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +openai-1.2.4.dist-info/METADATA,sha256=6CqFlYNuDCiX8Xv0uWylMasRmV5Wfx5BsdVS9iDg5ms,16739 +openai-1.2.4.dist-info/RECORD,, +openai-1.2.4.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 +openai-1.2.4.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 +openai-1.2.4.dist-info/licenses/LICENSE,sha256=Sa73AcxOcVUuCfzKf62Fu6hK2T7keXrLoEPQPTErZTY,11336 +openai/__init__.py,sha256=QtuSgqYev3r9hstiMS8jd-E3dOrfg3VSYU4qt1rOsSI,9504 +openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 +openai/__pycache__/__init__.cpython-39.pyc,, +openai/__pycache__/__main__.cpython-39.pyc,, +openai/__pycache__/_base_client.cpython-39.pyc,, +openai/__pycache__/_client.cpython-39.pyc,, +openai/__pycache__/_compat.cpython-39.pyc,, +openai/__pycache__/_constants.cpython-39.pyc,, +openai/__pycache__/_exceptions.cpython-39.pyc,, +openai/__pycache__/_files.cpython-39.pyc,, +openai/__pycache__/_models.cpython-39.pyc,, +openai/__pycache__/_module_client.cpython-39.pyc,, +openai/__pycache__/_qs.cpython-39.pyc,, +openai/__pycache__/_resource.cpython-39.pyc,, +openai/__pycache__/_response.cpython-39.pyc,, +openai/__pycache__/_streaming.cpython-39.pyc,, +openai/__pycache__/_types.cpython-39.pyc,, +openai/__pycache__/_version.cpython-39.pyc,, +openai/__pycache__/pagination.cpython-39.pyc,, +openai/__pycache__/version.cpython-39.pyc,, +openai/_base_client.py,sha256=nz5HjrLH_RjeH9I4MCfP0i5BgGITNX1-dObibJBlSE0,55906 +openai/_client.py,sha256=eAz_Q9PsiunRQa25ZB5thElR_vrlgRn6amv8xbKZwHQ,19221 +openai/_compat.py,sha256=pofRsaU2XT43Ij6aMRr3XfKgIVIJfAEUeElVFaE4cTA,5102 +openai/_constants.py,sha256=DAtwhh5HUKB7BBQiqJvZ93P2ME0x5-vFzy8hhvl3WEA,315 +openai/_exceptions.py,sha256=jLL5mzvGA1U_JF6OqTGlGkiSu_klBgrJxW6ed0Dw9Kg,3575 +openai/_extras/__init__.py,sha256=PpHi-jjIVLWp4G1f5AfNQRJnoCxKiYU9CVMAyN-Crwo,131 +openai/_extras/__pycache__/__init__.cpython-39.pyc,, +openai/_extras/__pycache__/_common.cpython-39.pyc,, +openai/_extras/__pycache__/numpy_proxy.cpython-39.pyc,, +openai/_extras/__pycache__/pandas_proxy.cpython-39.pyc,, +openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364 +openai/_extras/numpy_proxy.py,sha256=GVVSyABlGzdtF3xfGc5JkI801IsiN3VkMIYfWs2iVf0,834 +openai/_extras/pandas_proxy.py,sha256=GV_R1ysbgq8y9WMSFQOhxvviz7CFrocj-oweX1v8FlA,672 +openai/_files.py,sha256=Ow1uBpIr0bLnIHxtN-mB4Lsa4uVgNU6wLVOkgqi9Gzc,3470 +openai/_models.py,sha256=4qZNTuHAH2YzEdEcH7SJJUa_6VXGk9ujzvXuH5owvLY,16057 +openai/_module_client.py,sha256=W-lfWtHAs0dg5XEJfIuLaTtfpkjVfIQb2Ve4VZ3fsc8,2702 +openai/_qs.py,sha256=AOkSz4rHtK4YI3ZU_kzea-zpwBUgEY8WniGmTPyEimc,4846 +openai/_resource.py,sha256=F_pXU86CNv_FFBPjW7Pwcr4wOkcBaEEkoeQOGrwLlXY,1070 +openai/_response.py,sha256=zF8CHtqUoheb_aKnYOhSZNZH1jGbeSFI8NwiqCAg6k8,9022 +openai/_streaming.py,sha256=bWbnngbms1v4h3Gc37tEthjE7AQpPYB275V0R8h7urI,7092 +openai/_types.py,sha256=TT4OXPns_DFN4DCtweBdA532qho2hTXl4kWXanzser4,9696 +openai/_utils/__init__.py,sha256=1DSY01oDA6kU6h8nWhj9GcbuHS-GAIiEW-W8JDx6n7c,1848 +openai/_utils/__pycache__/__init__.cpython-39.pyc,, +openai/_utils/__pycache__/_logs.cpython-39.pyc,, +openai/_utils/__pycache__/_proxy.cpython-39.pyc,, +openai/_utils/__pycache__/_transform.cpython-39.pyc,, +openai/_utils/__pycache__/_utils.cpython-39.pyc,, +openai/_utils/_logs.py,sha256=sFA_NejuNObTGGbfsXC03I38mrT9HjsgAJx4d3GP0ok,774 +openai/_utils/_proxy.py,sha256=zivv4nHupudnA3kdBqFg9xRLDaObmeY8l97Tcfg6bN4,1573 +openai/_utils/_transform.py,sha256=cqhMpiWUM-xyla39fuPjLhqCwxtMpKro-CLWwBqxFPM,7042 +openai/_utils/_utils.py,sha256=zZzmKMuFXpDqB4i9r-WodSzMFZihbemcwdHPGxYa1q4,11751 +openai/_version.py,sha256=ybiEU_3KscMdMlGxDbwHUnuRtISu_DYwfy1GCzEsowM,125 +openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31 +openai/cli/__pycache__/__init__.cpython-39.pyc,, +openai/cli/__pycache__/_cli.cpython-39.pyc,, +openai/cli/__pycache__/_errors.cpython-39.pyc,, +openai/cli/__pycache__/_models.cpython-39.pyc,, +openai/cli/__pycache__/_progress.cpython-39.pyc,, +openai/cli/__pycache__/_utils.cpython-39.pyc,, +openai/cli/_api/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 +openai/cli/_api/__pycache__/__init__.cpython-39.pyc,, +openai/cli/_api/__pycache__/_main.cpython-39.pyc,, +openai/cli/_api/__pycache__/audio.cpython-39.pyc,, +openai/cli/_api/__pycache__/completions.cpython-39.pyc,, +openai/cli/_api/__pycache__/files.cpython-39.pyc,, +openai/cli/_api/__pycache__/image.cpython-39.pyc,, +openai/cli/_api/__pycache__/models.cpython-39.pyc,, +openai/cli/_api/_main.py,sha256=5yyfLURqCEaAN8B61gHaqVAaYgtyb9Xq0ncQ3P2BAh0,451 +openai/cli/_api/audio.py,sha256=HZDTRZT-qZTMsg7WOm-djCQlf874aSa3lxRvNG27wLM,3347 +openai/cli/_api/chat/__init__.py,sha256=MhFUQH9F6QCtbPMlbsU_DWTd7wc5DSCZ7Wy3FBGVij0,300 +openai/cli/_api/chat/__pycache__/__init__.cpython-39.pyc,, +openai/cli/_api/chat/__pycache__/completions.cpython-39.pyc,, +openai/cli/_api/chat/completions.py,sha256=9Ztetyz7rm0gP5SOPWEcpzFJnJKuIEQit626vOq42bE,5363 +openai/cli/_api/completions.py,sha256=eF0vRA-cAPLhG86tdIXYnfj2od0tQa4l3bdJ-p6eGyQ,6411 +openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345 +openai/cli/_api/image.py,sha256=VKMRqKCHkl4JO7uP7RLDZu8DzF6ddQgpr3n2v9EOEBk,4711 +openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295 +openai/cli/_cli.py,sha256=WxqTnhVVtfzX0z7hV5fcvd3hkihaUgwOWpXOwyCS4Fc,6743 +openai/cli/_errors.py,sha256=OpvFKu_eRPEOGlq30e6zoPVDMdm6ocPyALgwWx4SaeI,482 +openai/cli/_models.py,sha256=tgsldjG216KpwgAZ5pS0sV02FQvONDJU2ElA4kCCiIU,491 +openai/cli/_progress.py,sha256=buv3H5bEevj4sQ4AXQ8qG75NHqewx5hkJFUy4_3rBx0,1399 +openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 +openai/cli/_tools/__pycache__/__init__.cpython-39.pyc,, +openai/cli/_tools/__pycache__/_main.cpython-39.pyc,, +openai/cli/_tools/__pycache__/fine_tunes.cpython-39.pyc,, +openai/cli/_tools/__pycache__/migrate.cpython-39.pyc,, +openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467 +openai/cli/_tools/fine_tunes.py,sha256=RQgYMzifk6S7Y1I1K6huqco2QxmXa7gVUlHl6SrKTSU,1543 +openai/cli/_tools/migrate.py,sha256=_M7Uoi_uh_jLPM_NcZQORf3bZ_paTVpkfH7bVzEIxjs,4875 +openai/cli/_utils.py,sha256=SvhD3IAvxGv8-fxK4wh2h0EVnoai3RVdyqFuRZ9sklc,849 +openai/lib/__pycache__/_old_api.cpython-39.pyc,, +openai/lib/__pycache__/_validators.cpython-39.pyc,, +openai/lib/__pycache__/azure.cpython-39.pyc,, +openai/lib/_old_api.py,sha256=ET_90btesuzdiYlZERxPt2b-cYcA3MlSQyiFFjz2rHE,1706 +openai/lib/_validators.py,sha256=0x2fpN75xPz6oG9y8V2LZbDnBO_xGlfWsJJa8vIm29U,35218 +openai/lib/azure.py,sha256=W2X9t6ThGgPzIoBLzehXixz4F98nbBYvf0lwZBqNYlA,17410 +openai/pagination.py,sha256=3MCfyXSd_fm-Ixdx6Z19Ob-S2SIoGUVrV3mGnShEuro,2603 +openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +openai/resources/__init__.py,sha256=H1yP0Iagv-YamT8ySJ028Dre8nphLwAdwo0OZI2JVT0,2588 +openai/resources/__pycache__/__init__.cpython-39.pyc,, +openai/resources/__pycache__/completions.cpython-39.pyc,, +openai/resources/__pycache__/edits.cpython-39.pyc,, +openai/resources/__pycache__/embeddings.cpython-39.pyc,, +openai/resources/__pycache__/files.cpython-39.pyc,, +openai/resources/__pycache__/fine_tunes.cpython-39.pyc,, +openai/resources/__pycache__/images.cpython-39.pyc,, +openai/resources/__pycache__/models.cpython-39.pyc,, +openai/resources/__pycache__/moderations.cpython-39.pyc,, +openai/resources/audio/__init__.py,sha256=2nXrKGjWa_ejR51Q1x192sJb5t75oiMuqjhtGB6xsf4,997 +openai/resources/audio/__pycache__/__init__.cpython-39.pyc,, +openai/resources/audio/__pycache__/audio.cpython-39.pyc,, +openai/resources/audio/__pycache__/speech.cpython-39.pyc,, +openai/resources/audio/__pycache__/transcriptions.cpython-39.pyc,, +openai/resources/audio/__pycache__/translations.cpython-39.pyc,, +openai/resources/audio/audio.py,sha256=XAO3wSlSadTFHgiM86ByXw77EbNAJtAHKEPt5bt2ECc,2234 +openai/resources/audio/speech.py,sha256=U7Ve89uiZR07vc162wahUyrOsJyLubUzAvCL7FpiLyk,6285 +openai/resources/audio/transcriptions.py,sha256=2Rzn3FIc4G7s1XXz4bJJ6kIwTYvHXBMUTXHdE3qAXgo,8852 +openai/resources/audio/translations.py,sha256=aU1BKJlsP1l3ZbZkS4JXtJqBxrsbtfDSJ5hvWq5uh10,8000 +openai/resources/beta/__init__.py,sha256=YJnI8dasxS3WbubsUnX08dWgkgzfUDeiZp85pIp3n0c,699 +openai/resources/beta/__pycache__/__init__.cpython-39.pyc,, +openai/resources/beta/__pycache__/beta.cpython-39.pyc,, +openai/resources/beta/assistants/__init__.py,sha256=rHQZg27vg1Wu64WvGnOoOKY5xtkkagOgrlwo0Z0PQGA,491 +openai/resources/beta/assistants/__pycache__/__init__.cpython-39.pyc,, +openai/resources/beta/assistants/__pycache__/assistants.cpython-39.pyc,, +openai/resources/beta/assistants/__pycache__/files.cpython-39.pyc,, +openai/resources/beta/assistants/assistants.py,sha256=CrQBy4_vzjTccVcY65TGfc19JDrFMDz-CznH23jQsKc,27512 +openai/resources/beta/assistants/files.py,sha256=EBzW-hcfetg0z-M7thy4ixp9AfLdlCU9bBUFdXfh-Mo,16555 +openai/resources/beta/beta.py,sha256=KjcRj6mPjkqXgzIWWWHk5L19e2NShfFBvrWMAWcr9vQ,1690 +openai/resources/beta/threads/__init__.py,sha256=KnV1SskwD8RtoxvfXQfrui5xwPc3_n5Py0z1JZG8TOA,681 +openai/resources/beta/threads/__pycache__/__init__.cpython-39.pyc,, +openai/resources/beta/threads/__pycache__/threads.cpython-39.pyc,, +openai/resources/beta/threads/messages/__init__.py,sha256=GuDSUTY9hCu14Ia9WuNXbXoM293bhFxksHdHANHCdOA,473 +openai/resources/beta/threads/messages/__pycache__/__init__.cpython-39.pyc,, +openai/resources/beta/threads/messages/__pycache__/files.cpython-39.pyc,, +openai/resources/beta/threads/messages/__pycache__/messages.cpython-39.pyc,, +openai/resources/beta/threads/messages/files.py,sha256=rVHzquAgO3Hp82jKXgfUAG-Y78hIKGVCFCzksWDcdc8,10106 +openai/resources/beta/threads/messages/messages.py,sha256=F63zHArzKcVwWp7aZyqvPxSjbC61Lsq_lFxt1TS1J0Y,19312 +openai/resources/beta/threads/runs/__init__.py,sha256=JDJA_Qyv3V1kDQ56HdWd3G92m1yc5otzJf8G-B3sd4w,416 +openai/resources/beta/threads/runs/__pycache__/__init__.cpython-39.pyc,, +openai/resources/beta/threads/runs/__pycache__/runs.cpython-39.pyc,, +openai/resources/beta/threads/runs/__pycache__/steps.cpython-39.pyc,, +openai/resources/beta/threads/runs/runs.py,sha256=A5hIbkOnS9ZwB8PfoWR6gYExBTZlC-tMldYeg5AYM8s,26472 +openai/resources/beta/threads/runs/steps.py,sha256=aKC-jINKL1Er5vJR1xrT1heMvM0DPlsT2iu94k7iZzA,10028 +openai/resources/beta/threads/threads.py,sha256=F4iKA-dTl3UyMRK3i4KvRZMyMMtft2iM5RzXnFG6Ees,21598 +openai/resources/chat/__init__.py,sha256=8zjX-HmXak-XvoPdstIV-uUF49SBHlHzRrbv-WqeqKE,491 +openai/resources/chat/__pycache__/__init__.cpython-39.pyc,, +openai/resources/chat/__pycache__/chat.cpython-39.pyc,, +openai/resources/chat/__pycache__/completions.cpython-39.pyc,, +openai/resources/chat/chat.py,sha256=ysGF5N1Gv6RcWmmqflnfN3rqcUqHjml53j_ruO6a7J0,1337 +openai/resources/chat/completions.py,sha256=sCRl5wy-_iT3Z2Px5_x-CUF1t-sD-P-UFB2Um_C5pAs,63203 +openai/resources/completions.py,sha256=0reJixDgruhC0ZlmhytvMCa7enidIK9bCu_Ovxr7sPY,58434 +openai/resources/edits.py,sha256=pb4DwpU_RD7YEsywNKwY1p5nfpFugZGI4GK1KRowbWo,7801 +openai/resources/embeddings.py,sha256=94YsyLJar5wTvDmQS3ifEBWQbN_MPUUP0vYDqwMh45c,9182 +openai/resources/files.py,sha256=6Nmlrekqn7A5r4DTJszsJMsF2jAhZs3I9p_Xd1Aze7Q,22594 +openai/resources/fine_tunes.py,sha256=tr5P9-SdB01BY36oNoqAZ3L-0u6iK_TEcbUmku8bsOU,37313 +openai/resources/fine_tuning/__init__.py,sha256=r31AXYyHoC42T8WfUrM7tGahQcU4T5MXd3SVxwL2UEE,483 +openai/resources/fine_tuning/__pycache__/__init__.cpython-39.pyc,, +openai/resources/fine_tuning/__pycache__/fine_tuning.cpython-39.pyc,, +openai/resources/fine_tuning/__pycache__/jobs.cpython-39.pyc,, +openai/resources/fine_tuning/fine_tuning.py,sha256=29_e1ff_qDd5gToWNXQBOhYbWkM0e_Z2yWdJnNbm8Tw,1283 +openai/resources/fine_tuning/jobs.py,sha256=NAVpV3uKQ6oREFJQ_8vQhQOZu0EsuHcSQI7viiri4_0,21922 +openai/resources/images.py,sha256=Ud_X0_xdyIHmXcCOKQ2X-9Qrz7I_D5F9D7dDlp0NDek,23001 +openai/resources/models.py,sha256=0ozhFDvubVScm5FmsqHwTA5NF5XksalXjhBoVGxW1uw,8575 +openai/resources/moderations.py,sha256=ZdE9JjsZoch_qULmrRq53qmVUfVeVvljZylfcjBdOkQ,5790 +openai/types/__init__.py,sha256=oLVWb-ZMrKAJMU1kprklR0e87b118Wl0eYcgmSG1vME,2175 +openai/types/__pycache__/__init__.cpython-39.pyc,, +openai/types/__pycache__/completion.cpython-39.pyc,, +openai/types/__pycache__/completion_choice.cpython-39.pyc,, +openai/types/__pycache__/completion_create_params.cpython-39.pyc,, +openai/types/__pycache__/completion_usage.cpython-39.pyc,, +openai/types/__pycache__/create_embedding_response.cpython-39.pyc,, +openai/types/__pycache__/edit.cpython-39.pyc,, +openai/types/__pycache__/edit_create_params.cpython-39.pyc,, +openai/types/__pycache__/embedding.cpython-39.pyc,, +openai/types/__pycache__/embedding_create_params.cpython-39.pyc,, +openai/types/__pycache__/file_content.cpython-39.pyc,, +openai/types/__pycache__/file_create_params.cpython-39.pyc,, +openai/types/__pycache__/file_deleted.cpython-39.pyc,, +openai/types/__pycache__/file_list_params.cpython-39.pyc,, +openai/types/__pycache__/file_object.cpython-39.pyc,, +openai/types/__pycache__/fine_tune.cpython-39.pyc,, +openai/types/__pycache__/fine_tune_create_params.cpython-39.pyc,, +openai/types/__pycache__/fine_tune_event.cpython-39.pyc,, +openai/types/__pycache__/fine_tune_events_list_response.cpython-39.pyc,, +openai/types/__pycache__/fine_tune_list_events_params.cpython-39.pyc,, +openai/types/__pycache__/image.cpython-39.pyc,, +openai/types/__pycache__/image_create_variation_params.cpython-39.pyc,, +openai/types/__pycache__/image_edit_params.cpython-39.pyc,, +openai/types/__pycache__/image_generate_params.cpython-39.pyc,, +openai/types/__pycache__/images_response.cpython-39.pyc,, +openai/types/__pycache__/model.cpython-39.pyc,, +openai/types/__pycache__/model_deleted.cpython-39.pyc,, +openai/types/__pycache__/moderation.cpython-39.pyc,, +openai/types/__pycache__/moderation_create_params.cpython-39.pyc,, +openai/types/__pycache__/moderation_create_response.cpython-39.pyc,, +openai/types/audio/__init__.py,sha256=1jElLAc9ZMmKAtRI7zZLhEzRzzt90PevVrBwtUePsqs,479 +openai/types/audio/__pycache__/__init__.cpython-39.pyc,, +openai/types/audio/__pycache__/speech_create_params.cpython-39.pyc,, +openai/types/audio/__pycache__/transcription.cpython-39.pyc,, +openai/types/audio/__pycache__/transcription_create_params.cpython-39.pyc,, +openai/types/audio/__pycache__/translation.cpython-39.pyc,, +openai/types/audio/__pycache__/translation_create_params.cpython-39.pyc,, +openai/types/audio/speech_create_params.py,sha256=UTR1lXNlFhgprCoqW2KEyk-OTpxmJIFDp5mO0KYi99U,1061 +openai/types/audio/transcription.py,sha256=bHHqJzRrdk_p23yvenTGjw0QBnjI3ebem1Ji1IJROOc,164 +openai/types/audio/transcription_create_params.py,sha256=H-OP-3omPi9zE3qkAkU-GOuctMJyqiRn6mg64Jx9Yqc,1694 +openai/types/audio/translation.py,sha256=qOh5RFGsAjX5fid6vFYtdNNO_7QVtAAZWzHwbcNc224,160 +openai/types/audio/translation_create_params.py,sha256=xxG6-xycSd8Zzt6yak1fYUa7BbbwOKxdVzWRbOYUE0A,1404 +openai/types/beta/__init__.py,sha256=rzhcv6NjaFw_q70XgEOnLzamMfwj07gkrrFUGOb6JSw,800 +openai/types/beta/__pycache__/__init__.cpython-39.pyc,, +openai/types/beta/__pycache__/assistant.cpython-39.pyc,, +openai/types/beta/__pycache__/assistant_create_params.cpython-39.pyc,, +openai/types/beta/__pycache__/assistant_deleted.cpython-39.pyc,, +openai/types/beta/__pycache__/assistant_list_params.cpython-39.pyc,, +openai/types/beta/__pycache__/assistant_update_params.cpython-39.pyc,, +openai/types/beta/__pycache__/thread.cpython-39.pyc,, +openai/types/beta/__pycache__/thread_create_and_run_params.cpython-39.pyc,, +openai/types/beta/__pycache__/thread_create_params.cpython-39.pyc,, +openai/types/beta/__pycache__/thread_deleted.cpython-39.pyc,, +openai/types/beta/__pycache__/thread_update_params.cpython-39.pyc,, +openai/types/beta/assistant.py,sha256=184z9uz2704LkY9nWZXh_a_6XbcDyHU8yvhUQfKSc5U,2559 +openai/types/beta/assistant_create_params.py,sha256=iBySbs07eMtetqfYlqgekXKsqxaPaK7Jg61RtFTFx-k,2539 +openai/types/beta/assistant_deleted.py,sha256=fV1wGdYtmF8irj2WRf6eR24JodG0z1rM_M5bwqSvoV4,268 +openai/types/beta/assistant_list_params.py,sha256=gsSRtdvZMiBxaTCvpfXy3LYKzViPl0ysx9a8tDguxf8,1187 +openai/types/beta/assistant_update_params.py,sha256=HV1eoBL1ZxPXqCrDGVvtYvygHgdXz6ornbTXfAfJXXM,2656 +openai/types/beta/assistants/__init__.py,sha256=nPm_I-yeVKKJRYqWzGrf9OoVDDDsg1c0Qj936gIEKkk,356 +openai/types/beta/assistants/__pycache__/__init__.cpython-39.pyc,, +openai/types/beta/assistants/__pycache__/assistant_file.cpython-39.pyc,, +openai/types/beta/assistants/__pycache__/file_create_params.cpython-39.pyc,, +openai/types/beta/assistants/__pycache__/file_delete_response.cpython-39.pyc,, +openai/types/beta/assistants/__pycache__/file_list_params.cpython-39.pyc,, +openai/types/beta/assistants/assistant_file.py,sha256=RLht24Zg-XbpuKZk2aVXFTH0R0_2c8smvfice7Gekxs,554 +openai/types/beta/assistants/file_create_params.py,sha256=J09q5nl2ZGJ4y8ShBJ5ur1PS7XGrNOBSBz0ptzX2-LM,484 +openai/types/beta/assistants/file_delete_response.py,sha256=unPwyynR71X4Ap0nJ4KENk2JGOv65SVLiavUVcLTVAc,278 +openai/types/beta/assistants/file_list_params.py,sha256=J-AQx5Eu1fgMjHOUBl3u1HGEDkANVRrmIUk4x7EXTN8,1177 +openai/types/beta/chat/__init__.py,sha256=WirADquEnYGjBTTLlIC-kNtGf6opfip3z-sKplC_k3Y,89 +openai/types/beta/chat/__pycache__/__init__.cpython-39.pyc,, +openai/types/beta/thread.py,sha256=Z_hjXLEiSBmhPNNgn5GKBryOvixYj71oPe1b1JuQmkU,799 +openai/types/beta/thread_create_and_run_params.py,sha256=_0KdAovrcW_oj-AuOq-WZMqbsN-sTRj-vtotDoUVx_M,3722 +openai/types/beta/thread_create_params.py,sha256=U_YyCEhqNkGW_a7_MEmV1udHj5UiLtv3DvIrPxfQeGM,1617 +openai/types/beta/thread_deleted.py,sha256=jrDuKYAP-oKp3-5ZvjoPR4Yr9aZ-VhWbl5vijDdtypA,259 +openai/types/beta/thread_update_params.py,sha256=ElYwOy-_AWDgKH_LRJddQO9saiWMiEMQuDK4MrKMeLQ,554 +openai/types/beta/threads/__init__.py,sha256=hdORnHIhvktMY9Sx5H07D4YsFxqeWKLYDE4jp7mBHI8,1004 +openai/types/beta/threads/__pycache__/__init__.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/message_content_image_file.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/message_content_text.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/message_create_params.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/message_list_params.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/message_update_params.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/required_action_function_tool_call.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/run.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/run_create_params.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/run_list_params.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/run_submit_tool_outputs_params.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/run_update_params.cpython-39.pyc,, +openai/types/beta/threads/__pycache__/thread_message.cpython-39.pyc,, +openai/types/beta/threads/message_content_image_file.py,sha256=OT2xpdVLxorivEtN6wvkBlcA3tl0BBHSp80dknEsJcY,489 +openai/types/beta/threads/message_content_text.py,sha256=e4eIkUfqJDGBfYPCfDHrpA3YDr6fkEnZJLVQ8KaW5Bo,1575 +openai/types/beta/threads/message_create_params.py,sha256=y7Vj9uVZ_61vwPCpyBDOzrQiycIRrBj4Wvk1ZzZ1_B4,1100 +openai/types/beta/threads/message_list_params.py,sha256=GL2Hq8PoEFSbmQ4DVtCtgFSqc8VoFs7ftvVWIZcybkA,1183 +openai/types/beta/threads/message_update_params.py,sha256=Zy46a0o5rxrde_REpNZPmx4G89BMP5DHk1E5cMuUNVw,596 +openai/types/beta/threads/messages/__init__.py,sha256=NXH8z8CPb2Bhr_delxNMpdmzFLxaUnVyz7TX2GdTpbc,206 +openai/types/beta/threads/messages/__pycache__/__init__.cpython-39.pyc,, +openai/types/beta/threads/messages/__pycache__/file_list_params.cpython-39.pyc,, +openai/types/beta/threads/messages/__pycache__/message_file.cpython-39.pyc,, +openai/types/beta/threads/messages/file_list_params.py,sha256=L6pM_ZzxD_N2dPTeuBhe7FzkKbrXL1lF0gmGAjP_lN8,1217 +openai/types/beta/threads/messages/message_file.py,sha256=ZzehFlMaprtmwq4zLx8frhA6APldpexoulNQ4oMxYGU,695 +openai/types/beta/threads/required_action_function_tool_call.py,sha256=yuF4lNm5UEq8sMR96BeSCDD-C3kdQcbcf2A7VbZwVJ0,855 +openai/types/beta/threads/run.py,sha256=a3jZZFoQzd6ymGbaNx1-f9Ph1KerPzH_dLnCiGN7OlU,4451 +openai/types/beta/threads/run_create_params.py,sha256=xGzPIOTGAC9Aiwr0MjR6zAgGEHedWk2ax5gr_TArsBE,2192 +openai/types/beta/threads/run_list_params.py,sha256=TMAVh4Ffwzkx4WW1-L6ezfCB7LY-u2mdLP5phAvxd-A,1175 +openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=_Zvt4gUhFDdHHX0VXPBlH0J8Dj2QAS-i5KqfIaeC_QQ,719 +openai/types/beta/threads/run_update_params.py,sha256=lTdDYumU09-BzihovwaUWy_7em9l6d9DXiabK-j60wM,588 +openai/types/beta/threads/runs/__init__.py,sha256=WI0XUSX8hsPHJ-VhgGUbIeDl1MKvK25J22rUShWM-fs,583 +openai/types/beta/threads/runs/__pycache__/__init__.cpython-39.pyc,, +openai/types/beta/threads/runs/__pycache__/code_tool_call.cpython-39.pyc,, +openai/types/beta/threads/runs/__pycache__/function_tool_call.cpython-39.pyc,, +openai/types/beta/threads/runs/__pycache__/message_creation_step_details.cpython-39.pyc,, +openai/types/beta/threads/runs/__pycache__/retrieval_tool_call.cpython-39.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step.cpython-39.pyc,, +openai/types/beta/threads/runs/__pycache__/step_list_params.cpython-39.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_calls_step_details.cpython-39.pyc,, +openai/types/beta/threads/runs/code_tool_call.py,sha256=c_C8t3FkwkYehflYYBWPKKbracIcF48EI4xiYHBHicU,1632 +openai/types/beta/threads/runs/function_tool_call.py,sha256=aN0yWzqSbfcbA7ejKgFwJp3BhaeAFe6Z9_6pZ7Z-t-0,880 +openai/types/beta/threads/runs/message_creation_step_details.py,sha256=3iO8HpT51Vbc6tCWDVAQMJR03Uy0dkPF2G0xnvgH6Ss,474 +openai/types/beta/threads/runs/retrieval_tool_call.py,sha256=X4DgXBggLkbkxNbVy74Cot0TtzWdZv2WKtTXeHawbQY,481 +openai/types/beta/threads/runs/run_step.py,sha256=8uLP0QiJ41l1M-2Vq1mGOKqVh--BGCWo-_UngjuZCac,2809 +openai/types/beta/threads/runs/step_list_params.py,sha256=4RjV9cvUVIESZr7CWkIJbw2T-D9dCmW2uiHCkdh7nRg,1217 +openai/types/beta/threads/runs/tool_calls_step_details.py,sha256=WiZ3no8GpAFT26kBybsEPHnd9TyufhuIZXLDOew-2Ig,736 +openai/types/beta/threads/thread_message.py,sha256=V8-7QgQ-JzcFU_aLKiR0wqhSvRBbGtFY-1QUfnacVJ4,2061 +openai/types/chat/__init__.py,sha256=ovAlrTEycq3BItYHhNKXdyJhDWRMTAN0cueRDC_4zeo,2394 +openai/types/chat/__pycache__/__init__.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_assistant_message_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_chunk.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_image_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_text_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_function_call_option_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_function_message_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_message.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_message_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_message_tool_call.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_message_tool_call_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_named_tool_choice_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_role.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_choice_option_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_message_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_param.cpython-39.pyc,, +openai/types/chat/__pycache__/chat_completion_user_message_param.cpython-39.pyc,, +openai/types/chat/__pycache__/completion_create_params.cpython-39.pyc,, +openai/types/chat/chat_completion.py,sha256=PGa03GuJk9aE7N36-iINbdoO8BUY2wpzpE7aQr_CMf8,1924 +openai/types/chat/chat_completion_assistant_message_param.py,sha256=E2pwYV1Ee4jOECusNOjw2SsjmNZaiwNwEctzwViqaU0,1371 +openai/types/chat/chat_completion_chunk.py,sha256=kVDzKbzSu8blvqUWiA4Z2aF_uytzZPunuY3H1uqq6V0,3687 +openai/types/chat/chat_completion_content_part_image_param.py,sha256=jkaRMkQx7neXIGiUrwgUMOZvlfNSAmKwIpURRzCUkMI,627 +openai/types/chat/chat_completion_content_part_param.py,sha256=NidYunfVTItCFaIiFvfC0flbsPv6AVxTW5CDbikP-Jk,462 +openai/types/chat/chat_completion_content_part_text_param.py,sha256=_TJtxbHQaU6-I5uEoriVI7rseY7KNdHV9ouA6wJH9f4,396 +openai/types/chat/chat_completion_function_call_option_param.py,sha256=3FGW-vOFJ41UYqiiQujJn-M4uGt2MLHEVfzfWVLS0_I,332 +openai/types/chat/chat_completion_function_message_param.py,sha256=ovNUU-CvjXzzvrJy83bB7JVrr2wfkGOmr265eY4obfw,585 +openai/types/chat/chat_completion_message.py,sha256=hybo4l5hlnhnokxaMj8U33e6m02vLT2zbclzbTJdpYg,1242 +openai/types/chat/chat_completion_message_param.py,sha256=hNVkRJmP29bjIidWBv6KLIaF9360HACjlNPMQ8UacZc,805 +openai/types/chat/chat_completion_message_tool_call.py,sha256=2GS49NL6M9bx3yoFTRrZBtA5bGXMV6Vz87HDLzrVPxE,867 +openai/types/chat/chat_completion_message_tool_call_param.py,sha256=TPTxAapX7imm3Je3IxLYZCyvmkP4QYH0ad0wsk1fLlg,976 +openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=1Hp0hBJri-kGcsbkOnLvb00UMtgupPREWLbumXLGQUw,516 +openai/types/chat/chat_completion_role.py,sha256=na9kKLwNySYF4CDeuJKx8bo1lc20PHxe46oXHLAb8cY,207 +openai/types/chat/chat_completion_system_message_param.py,sha256=i5xLjMeexml0b2p9A4jjCEu6_1XLLDM3B5FWjSaXaQg,479 +openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=h52I05ob9-LzmY9ssGLzsH_Er5-wR0KH85Ud991l1yM,399 +openai/types/chat/chat_completion_tool_message_param.py,sha256=VO1qh8dAwdBs9zSqs5phqY-55SWkZ5lO-j70gl3dFKo,558 +openai/types/chat/chat_completion_tool_param.py,sha256=ez93XyQd_u0lAy40cO5aiJuboCNtvWyAZA8JhKFVURY,452 +openai/types/chat/chat_completion_user_message_param.py,sha256=3g065YLmg0E7J75oT874dJct65c7NOUpfxTyWviEWqI,593 +openai/types/chat/completion_create_params.py,sha256=QZ1tNd0K9-LufeYaL_A-BQiESZzDEcUMIAss6V5KPLE,9560 +openai/types/completion.py,sha256=TWZXMOLfKcZL_kKDgyJAiZ34_nFQiB66ts0svtp02so,1139 +openai/types/completion_choice.py,sha256=s5LDShJYGgkKnlaIhz23vSIF-4sGho9qRh6d0-Wllss,925 +openai/types/completion_create_params.py,sha256=jl3P4mOj9nOkRQr72Qy2mAu-MFFQiqnIa217DCvY094,7552 +openai/types/completion_usage.py,sha256=H1onC3vuVy5NKSUlW8FBfVzl_PfgFyrB_ytro80ZAjM,401 +openai/types/create_embedding_response.py,sha256=quvWyAE48qV_pwTyWTBT7OO-t1AF0zZaMUI8n-PPtCI,765 +openai/types/edit.py,sha256=77ccmAyQgNEaYbSQTvX-gFgeamaU_g0DHv6E3Jt9tt0,1129 +openai/types/edit_create_params.py,sha256=mjV0iQomNFtIQqfadCI6Sggqp7wYQGb7GiQ-yhKNSGs,1454 +openai/types/embedding.py,sha256=EPMqrP_OjLMuiTv0XcLTizicilcdgAw16akbdWD7xIc,604 +openai/types/embedding_create_params.py,sha256=EXRdsBD4I48EuaCGxJLuLiBE0wEaeJdvSltThQwb7E0,1564 +openai/types/file_content.py,sha256=GuNtqtZAuHFXS0uIJbTpOvzx_RePrE-lnH3FvNw4dJ8,100 +openai/types/file_create_params.py,sha256=4F6XpTDTRGXZjusxXbU-82rvxFAUju04IC6PywtDTYU,840 +openai/types/file_deleted.py,sha256=cJQWjVK1EiFj3kwV4ndQnF5Mlqv7ZSXNJtSwEWilL-A,244 +openai/types/file_list_params.py,sha256=0IpTyqCAMcMl-Q2W1v-4J_xpoCdSmh1zvv_p-X0OmEQ,277 +openai/types/file_object.py,sha256=owuX6HgYMquwzCif65pNSUV-5dtYs09wtjk0ONnfvME,1193 +openai/types/fine_tune.py,sha256=MYk77Cx_8V-W4nXTXWcK_tj4YnPkGr2OHQ6sR6r7wiU,2758 +openai/types/fine_tune_create_params.py,sha256=yyMgO5R3jhvAWyTr4HvkTFTItjdnUHhl52UPavqIyfU,5591 +openai/types/fine_tune_event.py,sha256=VDORrpwH-cII7PDOLGtgKjvzEihoTiAmu6PR9EVL8nU,282 +openai/types/fine_tune_events_list_response.py,sha256=o71CP6X6TnvTMOWk7S4R3OEBrF08_OEFB3CyvrMBqXY,340 +openai/types/fine_tune_list_events_params.py,sha256=aWTdLI8Wq_5yQ_0ulY0vRqltBFM2dMJahYc9XXXwyMg,1636 +openai/types/fine_tuning/__init__.py,sha256=wfu8ciqHopBRPUkZ0D_NkP14-1wYFgVrY8pbCI2i84s,431 +openai/types/fine_tuning/__pycache__/__init__.cpython-39.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-39.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_event.cpython-39.pyc,, +openai/types/fine_tuning/__pycache__/job_create_params.cpython-39.pyc,, +openai/types/fine_tuning/__pycache__/job_list_events_params.cpython-39.pyc,, +openai/types/fine_tuning/__pycache__/job_list_params.cpython-39.pyc,, +openai/types/fine_tuning/fine_tuning_job.py,sha256=Ehrz8xvUJRi3HXbDSs-8S56gDPpM05pH9Uzcbq55fxA,3270 +openai/types/fine_tuning/fine_tuning_job_event.py,sha256=TRx0UgtvikmYUiRg0vMEu57cNszpin0gsbbrHiPL-jY,341 +openai/types/fine_tuning/job_create_params.py,sha256=ly9Js-QGiQoCoVBVhD7oM-uybkjcPK172ozYI2V6fBM,2578 +openai/types/fine_tuning/job_list_events_params.py,sha256=tfpo_bsTGW2ZaPzv5eJyr5F38UEUiwE_MJDNttfKLwo,367 +openai/types/fine_tuning/job_list_params.py,sha256=Adm5vy-_0qoKP9Ubf_DOoT_pRkyB1YBdbvbAMAP40hw,363 +openai/types/image.py,sha256=HXUIin-8Jb643tEtHYg9YAE59-kqXdycB1p4XqS5qAY,574 +openai/types/image_create_variation_params.py,sha256=e1zOZaTKg37Qe6LgDt0haqHPdOpqj4KYmzGfS-dVX_0,1342 +openai/types/image_edit_params.py,sha256=0y23-ZDZyS0a_a5E2kfVcjH_BJo5XSi0XAnKxwCCQUU,1702 +openai/types/image_generate_params.py,sha256=0XlKCfUmXUi8pQKetb_pri-1qV5xRpU6cjmGynmCFJA,2008 +openai/types/images_response.py,sha256=3eOiPIDxOSkxEaNrb0HOnHZmPtFYWQtY_xpC-yJN6U0,241 +openai/types/model.py,sha256=d_h7gH9_7A2GrRHw61x8RFPEXM3uOlYqh6XVHGuMQ98,499 +openai/types/model_deleted.py,sha256=vDZMiixtF903ce9bGixXpus5J1KrHYGEyu_Eb6kO-m8,195 +openai/types/moderation.py,sha256=xSZnbax_E04ppN4dw9S7Pf8Y6dzIL9-W1uL26fn_tyk,3983 +openai/types/moderation_create_params.py,sha256=VbmYPy75IFhWNeZGV_ocuM_m5w2XdL6ogIDLf0gAwos,921 +openai/types/moderation_create_response.py,sha256=S2IXOkUwnlEWH8hKpMI8nPpg2NEiLYE3OfTXL892xvA,451 +openai/types/shared/__init__.py,sha256=ZElh_qWWutVp2d2fkFakb8sGeqcB6M_GWtC3SYohO8g,202 +openai/types/shared/__pycache__/__init__.cpython-39.pyc,, +openai/types/shared/__pycache__/function_definition.cpython-39.pyc,, +openai/types/shared/__pycache__/function_parameters.cpython-39.pyc,, +openai/types/shared/function_definition.py,sha256=93A5kUZp22ROJJoxgHFFTb6_bQofWd4j-Y9pXwAJmvs,1043 +openai/types/shared/function_parameters.py,sha256=Xj6wM1hq1Kp6ZsYyNdbjfW9LLfZwJFH7HWuzqLnMu-0,152 +openai/types/shared_params/__init__.py,sha256=ZElh_qWWutVp2d2fkFakb8sGeqcB6M_GWtC3SYohO8g,202 +openai/types/shared_params/__pycache__/__init__.cpython-39.pyc,, +openai/types/shared_params/__pycache__/function_definition.cpython-39.pyc,, +openai/types/shared_params/__pycache__/function_parameters.cpython-39.pyc,, +openai/types/shared_params/function_definition.py,sha256=Q9KFN5n68odV_D9V1CYn1UJX6lIjjRNDy4dUULF-FxI,1081 +openai/types/shared_params/function_parameters.py,sha256=_zsz301xdUydlEuHd3igO6B660FXZb1muELvc0Eq_a8,188 +openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62 diff --git a/site-packages/openai-1.0.0rc1.dist-info/WHEEL b/site-packages/openai-1.2.4.dist-info/WHEEL similarity index 100% rename from site-packages/openai-1.0.0rc1.dist-info/WHEEL rename to site-packages/openai-1.2.4.dist-info/WHEEL diff --git a/site-packages/openai-1.0.0rc1.dist-info/entry_points.txt b/site-packages/openai-1.2.4.dist-info/entry_points.txt similarity index 100% rename from site-packages/openai-1.0.0rc1.dist-info/entry_points.txt rename to site-packages/openai-1.2.4.dist-info/entry_points.txt diff --git a/site-packages/openai-1.0.0rc1.dist-info/licenses/LICENSE b/site-packages/openai-1.2.4.dist-info/licenses/LICENSE similarity index 100% rename from site-packages/openai-1.0.0rc1.dist-info/licenses/LICENSE rename to site-packages/openai-1.2.4.dist-info/licenses/LICENSE diff --git a/site-packages/openai/.DS_Store b/site-packages/openai/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 None: # type: ignore[reportUnusedFunction] _client = None +from ._module_client import beta as beta from ._module_client import chat as chat from ._module_client import audio as audio from ._module_client import edits as edits diff --git a/site-packages/openai/_base_client.py b/site-packages/openai/_base_client.py index 5ed9f54..3db8b6f 100644 --- a/site-packages/openai/_base_client.py +++ b/site-packages/openai/_base_client.py @@ -1,5 +1,6 @@ from __future__ import annotations +import os import json import time import uuid @@ -60,6 +61,7 @@ from ._types import ( RequestOptions, UnknownResponse, ModelBuilderProtocol, + BinaryResponseContent, ) from ._utils import is_dict, is_given, is_mapping from ._compat import model_copy, model_dump @@ -1318,12 +1320,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]): if retries > 0: return await self._retry_request(options, cast_to, retries, stream=stream, stream_cls=stream_cls) raise APITimeoutError(request=request) from err - except httpx.ReadTimeout as err: - # We explicitly do not retry on ReadTimeout errors as this means - # that the server processing the request has taken 60 seconds - # (our default timeout). This likely indicates that something - # is not working as expected on the server side. - raise except httpx.TimeoutException as err: if retries > 0: return await self._retry_request(options, cast_to, retries, stream=stream, stream_cls=stream_cls) @@ -1535,7 +1531,7 @@ def make_request_options( extra_query: Query | None = None, extra_body: Body | None = None, idempotency_key: str | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, post_parser: PostParser | NotGiven = NOT_GIVEN, ) -> RequestOptions: """Create a dict of type RequestOptions without keys of NotGiven values.""" @@ -1672,3 +1668,105 @@ def _merge_mappings( """ merged = {**obj1, **obj2} return {key: value for key, value in merged.items() if not isinstance(value, Omit)} + + +class HttpxBinaryResponseContent(BinaryResponseContent): + response: httpx.Response + + def __init__(self, response: httpx.Response) -> None: + self.response = response + + @property + @override + def content(self) -> bytes: + return self.response.content + + @property + @override + def text(self) -> str: + return self.response.text + + @property + @override + def encoding(self) -> Optional[str]: + return self.response.encoding + + @property + @override + def charset_encoding(self) -> Optional[str]: + return self.response.charset_encoding + + @override + def json(self, **kwargs: Any) -> Any: + return self.response.json(**kwargs) + + @override + def read(self) -> bytes: + return self.response.read() + + @override + def iter_bytes(self, chunk_size: Optional[int] = None) -> Iterator[bytes]: + return self.response.iter_bytes(chunk_size) + + @override + def iter_text(self, chunk_size: Optional[int] = None) -> Iterator[str]: + return self.response.iter_text(chunk_size) + + @override + def iter_lines(self) -> Iterator[str]: + return self.response.iter_lines() + + @override + def iter_raw(self, chunk_size: Optional[int] = None) -> Iterator[bytes]: + return self.response.iter_raw(chunk_size) + + @override + def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + with open(file, mode="wb") as f: + for data in self.response.iter_bytes(chunk_size): + f.write(data) + + @override + def close(self) -> None: + return self.response.close() + + @override + async def aread(self) -> bytes: + return await self.response.aread() + + @override + async def aiter_bytes(self, chunk_size: Optional[int] = None) -> AsyncIterator[bytes]: + return self.response.aiter_bytes(chunk_size) + + @override + async def aiter_text(self, chunk_size: Optional[int] = None) -> AsyncIterator[str]: + return self.response.aiter_text(chunk_size) + + @override + async def aiter_lines(self) -> AsyncIterator[str]: + return self.response.aiter_lines() + + @override + async def aiter_raw(self, chunk_size: Optional[int] = None) -> AsyncIterator[bytes]: + return self.response.aiter_raw(chunk_size) + + @override + async def astream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + path = anyio.Path(file) + async with await path.open(mode="wb") as f: + async for data in self.response.aiter_bytes(chunk_size): + await f.write(data) + + @override + async def aclose(self) -> None: + return await self.response.aclose() diff --git a/site-packages/openai/_client.py b/site-packages/openai/_client.py index 9df7eab..7820d5f 100644 --- a/site-packages/openai/_client.py +++ b/site-packages/openai/_client.py @@ -20,7 +20,7 @@ from ._types import ( ProxiesTypes, RequestOptions, ) -from ._utils import is_given +from ._utils import is_given, is_mapping from ._version import __version__ from ._streaming import Stream as Stream from ._streaming import AsyncStream as AsyncStream @@ -52,6 +52,7 @@ class OpenAI(SyncAPIClient): models: resources.Models fine_tuning: resources.FineTuning fine_tunes: resources.FineTunes + beta: resources.Beta with_raw_response: OpenAIWithRawResponse # client options @@ -125,6 +126,7 @@ class OpenAI(SyncAPIClient): self.models = resources.Models(self) self.fine_tuning = resources.FineTuning(self) self.fine_tunes = resources.FineTunes(self) + self.beta = resources.Beta(self) self.with_raw_response = OpenAIWithRawResponse(self) @property @@ -219,30 +221,31 @@ class OpenAI(SyncAPIClient): body: object, response: httpx.Response, ) -> APIStatusError: + data = body.get("error", body) if is_mapping(body) else body if response.status_code == 400: - return _exceptions.BadRequestError(err_msg, response=response, body=body) + return _exceptions.BadRequestError(err_msg, response=response, body=data) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError(err_msg, response=response, body=data) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError(err_msg, response=response, body=data) if response.status_code == 404: - return _exceptions.NotFoundError(err_msg, response=response, body=body) + return _exceptions.NotFoundError(err_msg, response=response, body=data) if response.status_code == 409: - return _exceptions.ConflictError(err_msg, response=response, body=body) + return _exceptions.ConflictError(err_msg, response=response, body=data) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data) if response.status_code == 429: - return _exceptions.RateLimitError(err_msg, response=response, body=body) + return _exceptions.RateLimitError(err_msg, response=response, body=data) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) - return APIStatusError(err_msg, response=response, body=body) + return _exceptions.InternalServerError(err_msg, response=response, body=data) + return APIStatusError(err_msg, response=response, body=data) class AsyncOpenAI(AsyncAPIClient): @@ -257,6 +260,7 @@ class AsyncOpenAI(AsyncAPIClient): models: resources.AsyncModels fine_tuning: resources.AsyncFineTuning fine_tunes: resources.AsyncFineTunes + beta: resources.AsyncBeta with_raw_response: AsyncOpenAIWithRawResponse # client options @@ -330,6 +334,7 @@ class AsyncOpenAI(AsyncAPIClient): self.models = resources.AsyncModels(self) self.fine_tuning = resources.AsyncFineTuning(self) self.fine_tunes = resources.AsyncFineTunes(self) + self.beta = resources.AsyncBeta(self) self.with_raw_response = AsyncOpenAIWithRawResponse(self) @property @@ -427,30 +432,31 @@ class AsyncOpenAI(AsyncAPIClient): body: object, response: httpx.Response, ) -> APIStatusError: + data = body.get("error", body) if is_mapping(body) else body if response.status_code == 400: - return _exceptions.BadRequestError(err_msg, response=response, body=body) + return _exceptions.BadRequestError(err_msg, response=response, body=data) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError(err_msg, response=response, body=data) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError(err_msg, response=response, body=data) if response.status_code == 404: - return _exceptions.NotFoundError(err_msg, response=response, body=body) + return _exceptions.NotFoundError(err_msg, response=response, body=data) if response.status_code == 409: - return _exceptions.ConflictError(err_msg, response=response, body=body) + return _exceptions.ConflictError(err_msg, response=response, body=data) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data) if response.status_code == 429: - return _exceptions.RateLimitError(err_msg, response=response, body=body) + return _exceptions.RateLimitError(err_msg, response=response, body=data) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) - return APIStatusError(err_msg, response=response, body=body) + return _exceptions.InternalServerError(err_msg, response=response, body=data) + return APIStatusError(err_msg, response=response, body=data) class OpenAIWithRawResponse: @@ -466,6 +472,7 @@ class OpenAIWithRawResponse: self.models = resources.ModelsWithRawResponse(client.models) self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning) self.fine_tunes = resources.FineTunesWithRawResponse(client.fine_tunes) + self.beta = resources.BetaWithRawResponse(client.beta) class AsyncOpenAIWithRawResponse: @@ -481,6 +488,7 @@ class AsyncOpenAIWithRawResponse: self.models = resources.AsyncModelsWithRawResponse(client.models) self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning) self.fine_tunes = resources.AsyncFineTunesWithRawResponse(client.fine_tunes) + self.beta = resources.AsyncBetaWithRawResponse(client.beta) Client = OpenAI diff --git a/site-packages/openai/_extras/.DS_Store b/site-packages/openai/_extras/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 None: if not is_file_content(obj): prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" raise RuntimeError( - f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/openai/openai-python/tree/v1#file-uploads" + f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/openai/openai-python/tree/main#file-uploads" ) from None diff --git a/site-packages/openai/_models.py b/site-packages/openai/_models.py index 40245ac..ebaef99 100644 --- a/site-packages/openai/_models.py +++ b/site-packages/openai/_models.py @@ -121,6 +121,7 @@ class BaseModel(pydantic.BaseModel): if PYDANTIC_V2: _extra[key] = value else: + _fields_set.add(key) fields_values[key] = value object.__setattr__(m, "__dict__", fields_values) @@ -313,16 +314,13 @@ def construct_type(*, value: object, type_: type) -> object: return [construct_type(value=entry, type_=inner_type) for entry in value] if origin == float: - try: - return float(cast(Any, value)) - except Exception: - return value + if isinstance(value, int): + coerced = float(value) + if coerced != value: + return value + return coerced - if origin == int: - try: - return int(cast(Any, value)) - except Exception: - return value + return value if type_ == datetime: try: diff --git a/site-packages/openai/_module_client.py b/site-packages/openai/_module_client.py index ca80468..fe8e0a2 100644 --- a/site-packages/openai/_module_client.py +++ b/site-packages/openai/_module_client.py @@ -12,6 +12,12 @@ class ChatProxy(LazyProxy[resources.Chat]): return _load_client().chat +class BetaProxy(LazyProxy[resources.Beta]): + @override + def __load__(self) -> resources.Beta: + return _load_client().beta + + class EditsProxy(LazyProxy[resources.Edits]): @override def __load__(self) -> resources.Edits: @@ -73,6 +79,7 @@ class FineTuningProxy(LazyProxy[resources.FineTuning]): chat: resources.Chat = ChatProxy().__as_proxied__() +beta: resources.Beta = BetaProxy().__as_proxied__() edits: resources.Edits = EditsProxy().__as_proxied__() files: resources.Files = FilesProxy().__as_proxied__() audio: resources.Audio = AudioProxy().__as_proxied__() diff --git a/site-packages/openai/_response.py b/site-packages/openai/_response.py index 1839821..3cc8fd8 100644 --- a/site-packages/openai/_response.py +++ b/site-packages/openai/_response.py @@ -9,7 +9,7 @@ from typing_extensions import Awaitable, ParamSpec, get_args, override, get_orig import httpx import pydantic -from ._types import NoneType, UnknownResponse +from ._types import NoneType, UnknownResponse, BinaryResponseContent from ._utils import is_given from ._models import BaseModel from ._constants import RAW_RESPONSE_HEADER @@ -135,6 +135,9 @@ class APIResponse(Generic[R]): origin = get_origin(cast_to) or cast_to + if inspect.isclass(origin) and issubclass(origin, BinaryResponseContent): + return cast(R, cast_to(response)) # type: ignore + if origin == APIResponse: raise RuntimeError("Unexpected state - cast_to is `APIResponse`") diff --git a/site-packages/openai/_streaming.py b/site-packages/openai/_streaming.py index cee737f..0957466 100644 --- a/site-packages/openai/_streaming.py +++ b/site-packages/openai/_streaming.py @@ -47,8 +47,9 @@ class Stream(Generic[ResponseT]): cast_to = self._cast_to response = self.response process_data = self._client._process_response_data + iterator = self._iter_events() - for sse in self._iter_events(): + for sse in iterator: if sse.data.startswith("[DONE]"): break @@ -63,6 +64,10 @@ class Stream(Generic[ResponseT]): yield process_data(data=data, cast_to=cast_to, response=response) + # Ensure the entire stream is consumed + for sse in iterator: + ... + class AsyncStream(Generic[ResponseT]): """Provides the core interface to iterate over an asynchronous stream response.""" @@ -97,8 +102,9 @@ class AsyncStream(Generic[ResponseT]): cast_to = self._cast_to response = self.response process_data = self._client._process_response_data + iterator = self._iter_events() - async for sse in self._iter_events(): + async for sse in iterator: if sse.data.startswith("[DONE]"): break @@ -113,6 +119,10 @@ class AsyncStream(Generic[ResponseT]): yield process_data(data=data, cast_to=cast_to, response=response) + # Ensure the entire stream is consumed + async for sse in iterator: + ... + class ServerSentEvent: def __init__( diff --git a/site-packages/openai/_types.py b/site-packages/openai/_types.py index cab9fc5..0d05be9 100644 --- a/site-packages/openai/_types.py +++ b/site-packages/openai/_types.py @@ -1,6 +1,7 @@ from __future__ import annotations from os import PathLike +from abc import ABC, abstractmethod from typing import ( IO, TYPE_CHECKING, @@ -13,8 +14,10 @@ from typing import ( Mapping, TypeVar, Callable, + Iterator, Optional, Sequence, + AsyncIterator, ) from typing_extensions import ( Literal, @@ -25,7 +28,6 @@ from typing_extensions import ( runtime_checkable, ) -import httpx import pydantic from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport @@ -40,11 +42,170 @@ AnyMapping = Mapping[str, object] ModelT = TypeVar("ModelT", bound=pydantic.BaseModel) _T = TypeVar("_T") + +class BinaryResponseContent(ABC): + def __init__( + self, + response: Any, + ) -> None: + ... + + @property + @abstractmethod + def content(self) -> bytes: + pass + + @property + @abstractmethod + def text(self) -> str: + pass + + @property + @abstractmethod + def encoding(self) -> Optional[str]: + """ + Return an encoding to use for decoding the byte content into text. + The priority for determining this is given by... + + * `.encoding = <>` has been set explicitly. + * The encoding as specified by the charset parameter in the Content-Type header. + * The encoding as determined by `default_encoding`, which may either be + a string like "utf-8" indicating the encoding to use, or may be a callable + which enables charset autodetection. + """ + pass + + @property + @abstractmethod + def charset_encoding(self) -> Optional[str]: + """ + Return the encoding, as specified by the Content-Type header. + """ + pass + + @abstractmethod + def json(self, **kwargs: Any) -> Any: + pass + + @abstractmethod + def read(self) -> bytes: + """ + Read and return the response content. + """ + pass + + @abstractmethod + def iter_bytes(self, chunk_size: Optional[int] = None) -> Iterator[bytes]: + """ + A byte-iterator over the decoded response content. + This allows us to handle gzip, deflate, and brotli encoded responses. + """ + pass + + @abstractmethod + def iter_text(self, chunk_size: Optional[int] = None) -> Iterator[str]: + """ + A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + pass + + @abstractmethod + def iter_lines(self) -> Iterator[str]: + pass + + @abstractmethod + def iter_raw(self, chunk_size: Optional[int] = None) -> Iterator[bytes]: + """ + A byte-iterator over the raw response content. + """ + pass + + @abstractmethod + def stream_to_file( + self, + file: str | PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + """ + Stream the output to the given file. + """ + pass + + @abstractmethod + def close(self) -> None: + """ + Close the response and release the connection. + Automatically called if the response body is read to completion. + """ + pass + + @abstractmethod + async def aread(self) -> bytes: + """ + Read and return the response content. + """ + pass + + @abstractmethod + async def aiter_bytes(self, chunk_size: Optional[int] = None) -> AsyncIterator[bytes]: + """ + A byte-iterator over the decoded response content. + This allows us to handle gzip, deflate, and brotli encoded responses. + """ + pass + + @abstractmethod + async def aiter_text(self, chunk_size: Optional[int] = None) -> AsyncIterator[str]: + """ + A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + pass + + @abstractmethod + async def aiter_lines(self) -> AsyncIterator[str]: + pass + + @abstractmethod + async def aiter_raw(self, chunk_size: Optional[int] = None) -> AsyncIterator[bytes]: + """ + A byte-iterator over the raw response content. + """ + pass + + @abstractmethod + async def astream_to_file( + self, + file: str | PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + """ + Stream the output to the given file. + """ + pass + + @abstractmethod + async def aclose(self) -> None: + """ + Close the response and release the connection. + Automatically called if the response body is read to completion. + """ + pass + + # Approximates httpx internal ProxiesTypes and RequestFiles types # while adding support for `PathLike` instances ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]] ProxiesTypes = Union[str, Proxy, ProxiesDict] -FileContent = Union[IO[bytes], bytes, PathLike] +if TYPE_CHECKING: + FileContent = Union[IO[bytes], bytes, PathLike[str]] +else: + FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8. FileTypes = Union[ # file (or bytes) FileContent, @@ -181,7 +342,7 @@ HeadersLike = Union[Headers, HeadersLikeProtocol] ResponseT = TypeVar( "ResponseT", - bound="Union[str, None, BaseModel, List[Any], Dict[str, Any], httpx.Response, UnknownResponse, ModelBuilderProtocol]", + bound="Union[str, None, BaseModel, List[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol, BinaryResponseContent]", ) StrBytesIntFloat = Union[str, bytes, int, float] diff --git a/site-packages/openai/_utils/.DS_Store b/site-packages/openai/_utils/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 type | None: +def _get_annotated_type(type_: type) -> type | None: """If the given type is an `Annotated` type then it is returned, if not `None` is returned. This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]` @@ -113,7 +115,7 @@ def _maybe_transform_key(key: str, type_: type) -> str: Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata. """ - annotated_type = _get_annoted_type(type_) + annotated_type = _get_annotated_type(type_) if annotated_type is None: # no `Annotated` definition for this type, no transformation needed return key @@ -165,11 +167,14 @@ def _transform_recursive( data = _transform_recursive(data, annotation=annotation, inner_type=subtype) return data + if isinstance(data, pydantic.BaseModel): + return model_dump(data, exclude_unset=True) + return _transform_value(data, annotation) def _transform_value(data: object, type_: type) -> object: - annotated_type = _get_annoted_type(type_) + annotated_type = _get_annotated_type(type_) if annotated_type is None: return data diff --git a/site-packages/openai/_version.py b/site-packages/openai/_version.py index 8cdfe78..f22b1aa 100644 --- a/site-packages/openai/_version.py +++ b/site-packages/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. __title__ = "openai" -__version__ = "1.0.0-rc1" +__version__ = "1.2.4" # x-release-please-version diff --git a/site-packages/openai/cli/.DS_Store b/site-packages/openai/cli/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 None: params: CompletionCreateParams = { "model": args.model, - "messages": [{"role": message.role, "content": message.content} for message in args.message], + "messages": [ + {"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message + ], "n": args.n, "temperature": args.temperature, "top_p": args.top_p, diff --git a/site-packages/openai/cli/_api/files.py b/site-packages/openai/cli/_api/files.py index ae6dadf..5f3631b 100644 --- a/site-packages/openai/cli/_api/files.py +++ b/site-packages/openai/cli/_api/files.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, cast from argparse import ArgumentParser from .._utils import get_client, print_model @@ -55,7 +55,12 @@ class CLIFile: with open(args.file, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") - file = get_client().files.create(file=(args.file, buffer_reader), purpose=args.purpose) + file = get_client().files.create( + file=(args.file, buffer_reader), + # casts required because the API is typed for enums + # but we don't want to validate that here for forwards-compat + purpose=cast(Any, args.purpose), + ) print_model(file) @staticmethod diff --git a/site-packages/openai/cli/_tools/.DS_Store b/site-packages/openai/cli/_tools/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0=1.0.0 - see the README at https://github.com/openai/openai-python for the API. + +You can run `openai migrate` to automatically upgrade your codebase to use the 1.0.0 interface. + +Alternatively, you can pin your installation to the old version, e.g. `pip install openai==0.28` + +A detailed migration guide is available here: https://github.com/openai/openai-python/discussions/742 +""" + + +class APIRemovedInV1(OpenAIError): + def __init__(self, *, symbol: str) -> None: + super().__init__(INSTRUCTIONS.format(symbol=symbol)) + + +class APIRemovedInV1Proxy(LazyProxy[None]): + def __init__(self, *, symbol: str) -> None: + super().__init__() + self._symbol = symbol + + @override + def __load__(self) -> None: + raise APIRemovedInV1(symbol=self._symbol) + + +SYMBOLS = [ + "Edit", + "File", + "Audio", + "Image", + "Model", + "Engine", + "Customer", + "FineTune", + "Embedding", + "Completion", + "Deployment", + "Moderation", + "ErrorObject", + "FineTuningJob", + "ChatCompletion", +] + +# we explicitly tell type checkers that nothing is exported +# from this file so that when we re-export the old symbols +# in `openai/__init__.py` they aren't added to the auto-complete +# suggestions given by editors +if TYPE_CHECKING: + __all__: list[str] = [] +else: + __all__ = SYMBOLS + + +__locals = locals() +for symbol in SYMBOLS: + __locals[symbol] = APIRemovedInV1Proxy(symbol=symbol) diff --git a/site-packages/openai/lib/_validators.py b/site-packages/openai/lib/_validators.py index 8e4ed3c..c8608c0 100644 --- a/site-packages/openai/lib/_validators.py +++ b/site-packages/openai/lib/_validators.py @@ -407,7 +407,7 @@ def completions_space_start_validator(df: pd.DataFrame) -> Remediation: """ def add_space_start(x: Any) -> Any: - x["completion"] = x["completion"].apply(lambda x: ("" if x[0] == " " else " ") + x) + x["completion"] = x["completion"].apply(lambda s: ("" if s.startswith(" ") else " ") + s) return x optional_msg = None diff --git a/site-packages/openai/lib/azure.py b/site-packages/openai/lib/azure.py index f5fcd24..d31313e 100644 --- a/site-packages/openai/lib/azure.py +++ b/site-packages/openai/lib/azure.py @@ -22,6 +22,7 @@ _deployments_endpoints = set( "/embeddings", "/audio/transcriptions", "/audio/translations", + "/images/generations", ] ) diff --git a/site-packages/openai/pagination.py b/site-packages/openai/pagination.py index ff45f39..4ec300f 100644 --- a/site-packages/openai/pagination.py +++ b/site-packages/openai/pagination.py @@ -1,7 +1,7 @@ # File generated from our OpenAPI spec by Stainless. from typing import Any, List, Generic, TypeVar, Optional, cast -from typing_extensions import Protocol, override, runtime_checkable +from typing_extensions import Literal, Protocol, override, runtime_checkable from ._types import ModelT from ._models import BaseModel @@ -21,7 +21,7 @@ class SyncPage(BaseSyncPage[ModelT], BasePage[ModelT], Generic[ModelT]): """Note: no pagination actually occurs yet, this is for forwards-compatibility.""" data: List[ModelT] - object: str + object: Literal["list"] @override def _get_page_items(self) -> List[ModelT]: @@ -40,7 +40,7 @@ class AsyncPage(BaseAsyncPage[ModelT], BasePage[ModelT], Generic[ModelT]): """Note: no pagination actually occurs yet, this is for forwards-compatibility.""" data: List[ModelT] - object: str + object: Literal["list"] @override def _get_page_items(self) -> List[ModelT]: diff --git a/site-packages/openai/resources/.DS_Store b/site-packages/openai/resources/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 None: super().__init__(client) self.transcriptions = Transcriptions(client) self.translations = Translations(client) + self.speech = Speech(client) self.with_raw_response = AudioWithRawResponse(self) class AsyncAudio(AsyncAPIResource): transcriptions: AsyncTranscriptions translations: AsyncTranslations + speech: AsyncSpeech with_raw_response: AsyncAudioWithRawResponse def __init__(self, client: AsyncOpenAI) -> None: super().__init__(client) self.transcriptions = AsyncTranscriptions(client) self.translations = AsyncTranslations(client) + self.speech = AsyncSpeech(client) self.with_raw_response = AsyncAudioWithRawResponse(self) @@ -52,9 +62,11 @@ class AudioWithRawResponse: def __init__(self, audio: Audio) -> None: self.transcriptions = TranscriptionsWithRawResponse(audio.transcriptions) self.translations = TranslationsWithRawResponse(audio.translations) + self.speech = SpeechWithRawResponse(audio.speech) class AsyncAudioWithRawResponse: def __init__(self, audio: AsyncAudio) -> None: self.transcriptions = AsyncTranscriptionsWithRawResponse(audio.transcriptions) self.translations = AsyncTranslationsWithRawResponse(audio.translations) + self.speech = AsyncSpeechWithRawResponse(audio.speech) diff --git a/site-packages/openai/resources/audio/speech.py b/site-packages/openai/resources/audio/speech.py new file mode 100644 index 0000000..4588438 --- /dev/null +++ b/site-packages/openai/resources/audio/speech.py @@ -0,0 +1,168 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING, Union +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from ...types.audio import speech_create_params +from ..._base_client import HttpxBinaryResponseContent, make_request_options + +if TYPE_CHECKING: + from ..._client import OpenAI, AsyncOpenAI + +__all__ = ["Speech", "AsyncSpeech"] + + +class Speech(SyncAPIResource): + with_raw_response: SpeechWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.with_raw_response = SpeechWithRawResponse(self) + + def create( + self, + *, + input: str, + model: Union[str, Literal["tts-1", "tts-1-hd"]], + voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + response_format: Literal["mp3", "opus", "aac", "flac"] | NotGiven = NOT_GIVEN, + speed: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> HttpxBinaryResponseContent: + """ + Generates audio from the input text. + + Args: + input: The text to generate audio for. The maximum length is 4096 characters. + + model: + One of the available [TTS models](https://platform.openai.com/docs/models/tts): + `tts-1` or `tts-1-hd` + + voice: The voice to use when generating the audio. Supported voices are `alloy`, + `echo`, `fable`, `onyx`, `nova`, and `shimmer`. + + response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + + speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + the default. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/audio/speech", + body=maybe_transform( + { + "input": input, + "model": model, + "voice": voice, + "response_format": response_format, + "speed": speed, + }, + speech_create_params.SpeechCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=HttpxBinaryResponseContent, + ) + + +class AsyncSpeech(AsyncAPIResource): + with_raw_response: AsyncSpeechWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.with_raw_response = AsyncSpeechWithRawResponse(self) + + async def create( + self, + *, + input: str, + model: Union[str, Literal["tts-1", "tts-1-hd"]], + voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + response_format: Literal["mp3", "opus", "aac", "flac"] | NotGiven = NOT_GIVEN, + speed: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> HttpxBinaryResponseContent: + """ + Generates audio from the input text. + + Args: + input: The text to generate audio for. The maximum length is 4096 characters. + + model: + One of the available [TTS models](https://platform.openai.com/docs/models/tts): + `tts-1` or `tts-1-hd` + + voice: The voice to use when generating the audio. Supported voices are `alloy`, + `echo`, `fable`, `onyx`, `nova`, and `shimmer`. + + response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + + speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + the default. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/audio/speech", + body=maybe_transform( + { + "input": input, + "model": model, + "voice": voice, + "response_format": response_format, + "speed": speed, + }, + speech_create_params.SpeechCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=HttpxBinaryResponseContent, + ) + + +class SpeechWithRawResponse: + def __init__(self, speech: Speech) -> None: + self.create = to_raw_response_wrapper( + speech.create, + ) + + +class AsyncSpeechWithRawResponse: + def __init__(self, speech: AsyncSpeech) -> None: + self.create = async_to_raw_response_wrapper( + speech.create, + ) diff --git a/site-packages/openai/resources/audio/transcriptions.py b/site-packages/openai/resources/audio/transcriptions.py index ca61f8b..d2b4452 100644 --- a/site-packages/openai/resources/audio/transcriptions.py +++ b/site-packages/openai/resources/audio/transcriptions.py @@ -5,6 +5,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, Union, Mapping, cast from typing_extensions import Literal +import httpx + from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import extract_files, maybe_transform, deepcopy_minimal from ..._resource import SyncAPIResource, AsyncAPIResource @@ -39,7 +41,7 @@ class Transcriptions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Transcription: """ Transcribes audio into the input language. @@ -60,8 +62,8 @@ class Transcriptions(SyncAPIResource): [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: json, text, srt, - verbose_json, or vtt. + response_format: The format of the transcript output, in one of these options: `json`, `text`, + `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -126,7 +128,7 @@ class AsyncTranscriptions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Transcription: """ Transcribes audio into the input language. @@ -147,8 +149,8 @@ class AsyncTranscriptions(AsyncAPIResource): [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: json, text, srt, - verbose_json, or vtt. + response_format: The format of the transcript output, in one of these options: `json`, `text`, + `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and diff --git a/site-packages/openai/resources/audio/translations.py b/site-packages/openai/resources/audio/translations.py index 0b499b9..fe7f7f2 100644 --- a/site-packages/openai/resources/audio/translations.py +++ b/site-packages/openai/resources/audio/translations.py @@ -5,6 +5,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, Union, Mapping, cast from typing_extensions import Literal +import httpx + from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import extract_files, maybe_transform, deepcopy_minimal from ..._resource import SyncAPIResource, AsyncAPIResource @@ -38,7 +40,7 @@ class Translations(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Translation: """ Translates audio into English. @@ -54,8 +56,8 @@ class Translations(SyncAPIResource): [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. - response_format: The format of the transcript output, in one of these options: json, text, srt, - verbose_json, or vtt. + response_format: The format of the transcript output, in one of these options: `json`, `text`, + `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -118,7 +120,7 @@ class AsyncTranslations(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Translation: """ Translates audio into English. @@ -134,8 +136,8 @@ class AsyncTranslations(AsyncAPIResource): [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. - response_format: The format of the transcript output, in one of these options: json, text, srt, - verbose_json, or vtt. + response_format: The format of the transcript output, in one of these options: `json`, `text`, + `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and diff --git a/site-packages/openai/resources/beta/__init__.py b/site-packages/openai/resources/beta/__init__.py new file mode 100644 index 0000000..55ad243 --- /dev/null +++ b/site-packages/openai/resources/beta/__init__.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. + +from .beta import Beta, AsyncBeta, BetaWithRawResponse, AsyncBetaWithRawResponse +from .threads import ( + Threads, + AsyncThreads, + ThreadsWithRawResponse, + AsyncThreadsWithRawResponse, +) +from .assistants import ( + Assistants, + AsyncAssistants, + AssistantsWithRawResponse, + AsyncAssistantsWithRawResponse, +) + +__all__ = [ + "Assistants", + "AsyncAssistants", + "AssistantsWithRawResponse", + "AsyncAssistantsWithRawResponse", + "Threads", + "AsyncThreads", + "ThreadsWithRawResponse", + "AsyncThreadsWithRawResponse", + "Beta", + "AsyncBeta", + "BetaWithRawResponse", + "AsyncBetaWithRawResponse", +] diff --git a/site-packages/openai/resources/beta/assistants/__init__.py b/site-packages/openai/resources/beta/assistants/__init__.py new file mode 100644 index 0000000..6efb0b2 --- /dev/null +++ b/site-packages/openai/resources/beta/assistants/__init__.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. + +from .files import Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse +from .assistants import ( + Assistants, + AsyncAssistants, + AssistantsWithRawResponse, + AsyncAssistantsWithRawResponse, +) + +__all__ = [ + "Files", + "AsyncFiles", + "FilesWithRawResponse", + "AsyncFilesWithRawResponse", + "Assistants", + "AsyncAssistants", + "AssistantsWithRawResponse", + "AsyncAssistantsWithRawResponse", +] diff --git a/site-packages/openai/resources/beta/assistants/assistants.py b/site-packages/openai/resources/beta/assistants/assistants.py new file mode 100644 index 0000000..efa711e --- /dev/null +++ b/site-packages/openai/resources/beta/assistants/assistants.py @@ -0,0 +1,656 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Optional +from typing_extensions import Literal + +import httpx + +from .files import Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from ....pagination import SyncCursorPage, AsyncCursorPage +from ....types.beta import ( + Assistant, + AssistantDeleted, + assistant_list_params, + assistant_create_params, + assistant_update_params, +) +from ...._base_client import AsyncPaginator, make_request_options + +if TYPE_CHECKING: + from ...._client import OpenAI, AsyncOpenAI + +__all__ = ["Assistants", "AsyncAssistants"] + + +class Assistants(SyncAPIResource): + files: Files + with_raw_response: AssistantsWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.files = Files(client) + self.with_raw_response = AssistantsWithRawResponse(self) + + def create( + self, + *, + model: str, + description: Optional[str] | NotGiven = NOT_GIVEN, + file_ids: List[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + tools: List[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Assistant: + """ + Create an assistant with a model and instructions. + + Args: + model: ID of the model to use. You can use the + [List models](https://platform.openai.com/docs/api-reference/models/list) API to + see all of your available models, or see our + [Model overview](https://platform.openai.com/docs/models/overview) for + descriptions of them. + + description: The description of the assistant. The maximum length is 512 characters. + + file_ids: A list of [file](https://platform.openai.com/docs/api-reference/files) IDs + attached to this assistant. There can be a maximum of 20 files attached to the + assistant. Files are ordered by their creation date in ascending order. + + instructions: The system instructions that the assistant uses. The maximum length is 32768 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + name: The name of the assistant. The maximum length is 256 characters. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + "/assistants", + body=maybe_transform( + { + "model": model, + "description": description, + "file_ids": file_ids, + "instructions": instructions, + "metadata": metadata, + "name": name, + "tools": tools, + }, + assistant_create_params.AssistantCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Assistant, + ) + + def retrieve( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Assistant: + """ + Retrieves an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Assistant, + ) + + def update( + self, + assistant_id: str, + *, + description: Optional[str] | NotGiven = NOT_GIVEN, + file_ids: List[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: str | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + tools: List[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Assistant: + """Modifies an assistant. + + Args: + description: The description of the assistant. + + The maximum length is 512 characters. + + file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs + attached to this assistant. There can be a maximum of 20 files attached to the + assistant. Files are ordered by their creation date in ascending order. If a + file was previosuly attached to the list but does not show up in the list, it + will be deleted from the assistant. + + instructions: The system instructions that the assistant uses. The maximum length is 32768 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: ID of the model to use. You can use the + [List models](https://platform.openai.com/docs/api-reference/models/list) API to + see all of your available models, or see our + [Model overview](https://platform.openai.com/docs/models/overview) for + descriptions of them. + + name: The name of the assistant. The maximum length is 256 characters. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/assistants/{assistant_id}", + body=maybe_transform( + { + "description": description, + "file_ids": file_ids, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "tools": tools, + }, + assistant_update_params.AssistantUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Assistant, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[Assistant]: + """Returns a list of assistants. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + "/assistants", + page=SyncCursorPage[Assistant], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + assistant_list_params.AssistantListParams, + ), + ), + model=Assistant, + ) + + def delete( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantDeleted: + """ + Delete an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._delete( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantDeleted, + ) + + +class AsyncAssistants(AsyncAPIResource): + files: AsyncFiles + with_raw_response: AsyncAssistantsWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.files = AsyncFiles(client) + self.with_raw_response = AsyncAssistantsWithRawResponse(self) + + async def create( + self, + *, + model: str, + description: Optional[str] | NotGiven = NOT_GIVEN, + file_ids: List[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + tools: List[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Assistant: + """ + Create an assistant with a model and instructions. + + Args: + model: ID of the model to use. You can use the + [List models](https://platform.openai.com/docs/api-reference/models/list) API to + see all of your available models, or see our + [Model overview](https://platform.openai.com/docs/models/overview) for + descriptions of them. + + description: The description of the assistant. The maximum length is 512 characters. + + file_ids: A list of [file](https://platform.openai.com/docs/api-reference/files) IDs + attached to this assistant. There can be a maximum of 20 files attached to the + assistant. Files are ordered by their creation date in ascending order. + + instructions: The system instructions that the assistant uses. The maximum length is 32768 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + name: The name of the assistant. The maximum length is 256 characters. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + "/assistants", + body=maybe_transform( + { + "model": model, + "description": description, + "file_ids": file_ids, + "instructions": instructions, + "metadata": metadata, + "name": name, + "tools": tools, + }, + assistant_create_params.AssistantCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Assistant, + ) + + async def retrieve( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Assistant: + """ + Retrieves an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._get( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Assistant, + ) + + async def update( + self, + assistant_id: str, + *, + description: Optional[str] | NotGiven = NOT_GIVEN, + file_ids: List[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: str | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + tools: List[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Assistant: + """Modifies an assistant. + + Args: + description: The description of the assistant. + + The maximum length is 512 characters. + + file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs + attached to this assistant. There can be a maximum of 20 files attached to the + assistant. Files are ordered by their creation date in ascending order. If a + file was previosuly attached to the list but does not show up in the list, it + will be deleted from the assistant. + + instructions: The system instructions that the assistant uses. The maximum length is 32768 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: ID of the model to use. You can use the + [List models](https://platform.openai.com/docs/api-reference/models/list) API to + see all of your available models, or see our + [Model overview](https://platform.openai.com/docs/models/overview) for + descriptions of them. + + name: The name of the assistant. The maximum length is 256 characters. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/assistants/{assistant_id}", + body=maybe_transform( + { + "description": description, + "file_ids": file_ids, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "tools": tools, + }, + assistant_update_params.AssistantUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Assistant, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[Assistant, AsyncCursorPage[Assistant]]: + """Returns a list of assistants. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + "/assistants", + page=AsyncCursorPage[Assistant], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + assistant_list_params.AssistantListParams, + ), + ), + model=Assistant, + ) + + async def delete( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantDeleted: + """ + Delete an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._delete( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantDeleted, + ) + + +class AssistantsWithRawResponse: + def __init__(self, assistants: Assistants) -> None: + self.files = FilesWithRawResponse(assistants.files) + + self.create = to_raw_response_wrapper( + assistants.create, + ) + self.retrieve = to_raw_response_wrapper( + assistants.retrieve, + ) + self.update = to_raw_response_wrapper( + assistants.update, + ) + self.list = to_raw_response_wrapper( + assistants.list, + ) + self.delete = to_raw_response_wrapper( + assistants.delete, + ) + + +class AsyncAssistantsWithRawResponse: + def __init__(self, assistants: AsyncAssistants) -> None: + self.files = AsyncFilesWithRawResponse(assistants.files) + + self.create = async_to_raw_response_wrapper( + assistants.create, + ) + self.retrieve = async_to_raw_response_wrapper( + assistants.retrieve, + ) + self.update = async_to_raw_response_wrapper( + assistants.update, + ) + self.list = async_to_raw_response_wrapper( + assistants.list, + ) + self.delete = async_to_raw_response_wrapper( + assistants.delete, + ) diff --git a/site-packages/openai/resources/beta/assistants/files.py b/site-packages/openai/resources/beta/assistants/files.py new file mode 100644 index 0000000..5ac5897 --- /dev/null +++ b/site-packages/openai/resources/beta/assistants/files.py @@ -0,0 +1,416 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing_extensions import Literal + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from ....pagination import SyncCursorPage, AsyncCursorPage +from ...._base_client import AsyncPaginator, make_request_options +from ....types.beta.assistants import ( + AssistantFile, + FileDeleteResponse, + file_list_params, + file_create_params, +) + +if TYPE_CHECKING: + from ...._client import OpenAI, AsyncOpenAI + +__all__ = ["Files", "AsyncFiles"] + + +class Files(SyncAPIResource): + with_raw_response: FilesWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.with_raw_response = FilesWithRawResponse(self) + + def create( + self, + assistant_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantFile: + """ + Create an assistant file by attaching a + [File](https://platform.openai.com/docs/api-reference/files) to an + [assistant](https://platform.openai.com/docs/api-reference/assistants). + + Args: + file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID (with + `purpose="assistants"`) that the assistant should use. Useful for tools like + `retrieval` and `code_interpreter` that can access files. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/assistants/{assistant_id}/files", + body=maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantFile, + ) + + def retrieve( + self, + file_id: str, + *, + assistant_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantFile: + """ + Retrieves an AssistantFile. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get( + f"/assistants/{assistant_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantFile, + ) + + def list( + self, + assistant_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[AssistantFile]: + """ + Returns a list of assistant files. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/assistants/{assistant_id}/files", + page=SyncCursorPage[AssistantFile], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + model=AssistantFile, + ) + + def delete( + self, + file_id: str, + *, + assistant_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """ + Delete an assistant file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._delete( + f"/assistants/{assistant_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + +class AsyncFiles(AsyncAPIResource): + with_raw_response: AsyncFilesWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.with_raw_response = AsyncFilesWithRawResponse(self) + + async def create( + self, + assistant_id: str, + *, + file_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantFile: + """ + Create an assistant file by attaching a + [File](https://platform.openai.com/docs/api-reference/files) to an + [assistant](https://platform.openai.com/docs/api-reference/assistants). + + Args: + file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID (with + `purpose="assistants"`) that the assistant should use. Useful for tools like + `retrieval` and `code_interpreter` that can access files. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/assistants/{assistant_id}/files", + body=maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantFile, + ) + + async def retrieve( + self, + file_id: str, + *, + assistant_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantFile: + """ + Retrieves an AssistantFile. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._get( + f"/assistants/{assistant_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantFile, + ) + + def list( + self, + assistant_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[AssistantFile, AsyncCursorPage[AssistantFile]]: + """ + Returns a list of assistant files. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/assistants/{assistant_id}/files", + page=AsyncCursorPage[AssistantFile], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + model=AssistantFile, + ) + + async def delete( + self, + file_id: str, + *, + assistant_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """ + Delete an assistant file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._delete( + f"/assistants/{assistant_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + +class FilesWithRawResponse: + def __init__(self, files: Files) -> None: + self.create = to_raw_response_wrapper( + files.create, + ) + self.retrieve = to_raw_response_wrapper( + files.retrieve, + ) + self.list = to_raw_response_wrapper( + files.list, + ) + self.delete = to_raw_response_wrapper( + files.delete, + ) + + +class AsyncFilesWithRawResponse: + def __init__(self, files: AsyncFiles) -> None: + self.create = async_to_raw_response_wrapper( + files.create, + ) + self.retrieve = async_to_raw_response_wrapper( + files.retrieve, + ) + self.list = async_to_raw_response_wrapper( + files.list, + ) + self.delete = async_to_raw_response_wrapper( + files.delete, + ) diff --git a/site-packages/openai/resources/beta/beta.py b/site-packages/openai/resources/beta/beta.py new file mode 100644 index 0000000..b552561 --- /dev/null +++ b/site-packages/openai/resources/beta/beta.py @@ -0,0 +1,60 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .threads import ( + Threads, + AsyncThreads, + ThreadsWithRawResponse, + AsyncThreadsWithRawResponse, +) +from .assistants import ( + Assistants, + AsyncAssistants, + AssistantsWithRawResponse, + AsyncAssistantsWithRawResponse, +) +from ..._resource import SyncAPIResource, AsyncAPIResource + +if TYPE_CHECKING: + from ..._client import OpenAI, AsyncOpenAI + +__all__ = ["Beta", "AsyncBeta"] + + +class Beta(SyncAPIResource): + assistants: Assistants + threads: Threads + with_raw_response: BetaWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.assistants = Assistants(client) + self.threads = Threads(client) + self.with_raw_response = BetaWithRawResponse(self) + + +class AsyncBeta(AsyncAPIResource): + assistants: AsyncAssistants + threads: AsyncThreads + with_raw_response: AsyncBetaWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.assistants = AsyncAssistants(client) + self.threads = AsyncThreads(client) + self.with_raw_response = AsyncBetaWithRawResponse(self) + + +class BetaWithRawResponse: + def __init__(self, beta: Beta) -> None: + self.assistants = AssistantsWithRawResponse(beta.assistants) + self.threads = ThreadsWithRawResponse(beta.threads) + + +class AsyncBetaWithRawResponse: + def __init__(self, beta: AsyncBeta) -> None: + self.assistants = AsyncAssistantsWithRawResponse(beta.assistants) + self.threads = AsyncThreadsWithRawResponse(beta.threads) diff --git a/site-packages/openai/resources/beta/threads/__init__.py b/site-packages/openai/resources/beta/threads/__init__.py new file mode 100644 index 0000000..b9aaada --- /dev/null +++ b/site-packages/openai/resources/beta/threads/__init__.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. + +from .runs import Runs, AsyncRuns, RunsWithRawResponse, AsyncRunsWithRawResponse +from .threads import ( + Threads, + AsyncThreads, + ThreadsWithRawResponse, + AsyncThreadsWithRawResponse, +) +from .messages import ( + Messages, + AsyncMessages, + MessagesWithRawResponse, + AsyncMessagesWithRawResponse, +) + +__all__ = [ + "Runs", + "AsyncRuns", + "RunsWithRawResponse", + "AsyncRunsWithRawResponse", + "Messages", + "AsyncMessages", + "MessagesWithRawResponse", + "AsyncMessagesWithRawResponse", + "Threads", + "AsyncThreads", + "ThreadsWithRawResponse", + "AsyncThreadsWithRawResponse", +] diff --git a/site-packages/openai/resources/beta/threads/messages/__init__.py b/site-packages/openai/resources/beta/threads/messages/__init__.py new file mode 100644 index 0000000..d8d4ce4 --- /dev/null +++ b/site-packages/openai/resources/beta/threads/messages/__init__.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. + +from .files import Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse +from .messages import ( + Messages, + AsyncMessages, + MessagesWithRawResponse, + AsyncMessagesWithRawResponse, +) + +__all__ = [ + "Files", + "AsyncFiles", + "FilesWithRawResponse", + "AsyncFilesWithRawResponse", + "Messages", + "AsyncMessages", + "MessagesWithRawResponse", + "AsyncMessagesWithRawResponse", +] diff --git a/site-packages/openai/resources/beta/threads/messages/files.py b/site-packages/openai/resources/beta/threads/messages/files.py new file mode 100644 index 0000000..e028a6f --- /dev/null +++ b/site-packages/openai/resources/beta/threads/messages/files.py @@ -0,0 +1,259 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing_extensions import Literal + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from .....pagination import SyncCursorPage, AsyncCursorPage +from ....._base_client import AsyncPaginator, make_request_options +from .....types.beta.threads.messages import MessageFile, file_list_params + +if TYPE_CHECKING: + from ....._client import OpenAI, AsyncOpenAI + +__all__ = ["Files", "AsyncFiles"] + + +class Files(SyncAPIResource): + with_raw_response: FilesWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.with_raw_response = FilesWithRawResponse(self) + + def retrieve( + self, + file_id: str, + *, + thread_id: str, + message_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageFile: + """ + Retrieves a message file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get( + f"/threads/{thread_id}/messages/{message_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageFile, + ) + + def list( + self, + message_id: str, + *, + thread_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[MessageFile]: + """Returns a list of message files. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/messages/{message_id}/files", + page=SyncCursorPage[MessageFile], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + model=MessageFile, + ) + + +class AsyncFiles(AsyncAPIResource): + with_raw_response: AsyncFilesWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.with_raw_response = AsyncFilesWithRawResponse(self) + + async def retrieve( + self, + file_id: str, + *, + thread_id: str, + message_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageFile: + """ + Retrieves a message file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._get( + f"/threads/{thread_id}/messages/{message_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageFile, + ) + + def list( + self, + message_id: str, + *, + thread_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[MessageFile, AsyncCursorPage[MessageFile]]: + """Returns a list of message files. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/messages/{message_id}/files", + page=AsyncCursorPage[MessageFile], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + model=MessageFile, + ) + + +class FilesWithRawResponse: + def __init__(self, files: Files) -> None: + self.retrieve = to_raw_response_wrapper( + files.retrieve, + ) + self.list = to_raw_response_wrapper( + files.list, + ) + + +class AsyncFilesWithRawResponse: + def __init__(self, files: AsyncFiles) -> None: + self.retrieve = async_to_raw_response_wrapper( + files.retrieve, + ) + self.list = async_to_raw_response_wrapper( + files.list, + ) diff --git a/site-packages/openai/resources/beta/threads/messages/messages.py b/site-packages/openai/resources/beta/threads/messages/messages.py new file mode 100644 index 0000000..30ae072 --- /dev/null +++ b/site-packages/openai/resources/beta/threads/messages/messages.py @@ -0,0 +1,479 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Optional +from typing_extensions import Literal + +import httpx + +from .files import Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from .....pagination import SyncCursorPage, AsyncCursorPage +from ....._base_client import AsyncPaginator, make_request_options +from .....types.beta.threads import ( + ThreadMessage, + message_list_params, + message_create_params, + message_update_params, +) + +if TYPE_CHECKING: + from ....._client import OpenAI, AsyncOpenAI + +__all__ = ["Messages", "AsyncMessages"] + + +class Messages(SyncAPIResource): + files: Files + with_raw_response: MessagesWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.files = Files(client) + self.with_raw_response = MessagesWithRawResponse(self) + + def create( + self, + thread_id: str, + *, + content: str, + role: Literal["user"], + file_ids: List[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadMessage: + """ + Create a message. + + Args: + content: The content of the message. + + role: The role of the entity that is creating the message. Currently only `user` is + supported. + + file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + the message should use. There can be a maximum of 10 files attached to a + message. Useful for tools like `retrieval` and `code_interpreter` that can + access and use files. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}/messages", + body=maybe_transform( + { + "content": content, + "role": role, + "file_ids": file_ids, + "metadata": metadata, + }, + message_create_params.MessageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadMessage, + ) + + def retrieve( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadMessage: + """ + Retrieve a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadMessage, + ) + + def update( + self, + message_id: str, + *, + thread_id: str, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadMessage: + """ + Modifies a message. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}/messages/{message_id}", + body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadMessage, + ) + + def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[ThreadMessage]: + """ + Returns a list of messages for a given thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/messages", + page=SyncCursorPage[ThreadMessage], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + message_list_params.MessageListParams, + ), + ), + model=ThreadMessage, + ) + + +class AsyncMessages(AsyncAPIResource): + files: AsyncFiles + with_raw_response: AsyncMessagesWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.files = AsyncFiles(client) + self.with_raw_response = AsyncMessagesWithRawResponse(self) + + async def create( + self, + thread_id: str, + *, + content: str, + role: Literal["user"], + file_ids: List[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadMessage: + """ + Create a message. + + Args: + content: The content of the message. + + role: The role of the entity that is creating the message. Currently only `user` is + supported. + + file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + the message should use. There can be a maximum of 10 files attached to a + message. Useful for tools like `retrieval` and `code_interpreter` that can + access and use files. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/messages", + body=maybe_transform( + { + "content": content, + "role": role, + "file_ids": file_ids, + "metadata": metadata, + }, + message_create_params.MessageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadMessage, + ) + + async def retrieve( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadMessage: + """ + Retrieve a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._get( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadMessage, + ) + + async def update( + self, + message_id: str, + *, + thread_id: str, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadMessage: + """ + Modifies a message. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/messages/{message_id}", + body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadMessage, + ) + + def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[ThreadMessage, AsyncCursorPage[ThreadMessage]]: + """ + Returns a list of messages for a given thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/messages", + page=AsyncCursorPage[ThreadMessage], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + message_list_params.MessageListParams, + ), + ), + model=ThreadMessage, + ) + + +class MessagesWithRawResponse: + def __init__(self, messages: Messages) -> None: + self.files = FilesWithRawResponse(messages.files) + + self.create = to_raw_response_wrapper( + messages.create, + ) + self.retrieve = to_raw_response_wrapper( + messages.retrieve, + ) + self.update = to_raw_response_wrapper( + messages.update, + ) + self.list = to_raw_response_wrapper( + messages.list, + ) + + +class AsyncMessagesWithRawResponse: + def __init__(self, messages: AsyncMessages) -> None: + self.files = AsyncFilesWithRawResponse(messages.files) + + self.create = async_to_raw_response_wrapper( + messages.create, + ) + self.retrieve = async_to_raw_response_wrapper( + messages.retrieve, + ) + self.update = async_to_raw_response_wrapper( + messages.update, + ) + self.list = async_to_raw_response_wrapper( + messages.list, + ) diff --git a/site-packages/openai/resources/beta/threads/runs/__init__.py b/site-packages/openai/resources/beta/threads/runs/__init__.py new file mode 100644 index 0000000..6b61813 --- /dev/null +++ b/site-packages/openai/resources/beta/threads/runs/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. + +from .runs import Runs, AsyncRuns, RunsWithRawResponse, AsyncRunsWithRawResponse +from .steps import Steps, AsyncSteps, StepsWithRawResponse, AsyncStepsWithRawResponse + +__all__ = [ + "Steps", + "AsyncSteps", + "StepsWithRawResponse", + "AsyncStepsWithRawResponse", + "Runs", + "AsyncRuns", + "RunsWithRawResponse", + "AsyncRunsWithRawResponse", +] diff --git a/site-packages/openai/resources/beta/threads/runs/runs.py b/site-packages/openai/resources/beta/threads/runs/runs.py new file mode 100644 index 0000000..969bfab --- /dev/null +++ b/site-packages/openai/resources/beta/threads/runs/runs.py @@ -0,0 +1,656 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Optional +from typing_extensions import Literal + +import httpx + +from .steps import Steps, AsyncSteps, StepsWithRawResponse, AsyncStepsWithRawResponse +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from .....pagination import SyncCursorPage, AsyncCursorPage +from ....._base_client import AsyncPaginator, make_request_options +from .....types.beta.threads import ( + Run, + run_list_params, + run_create_params, + run_update_params, + run_submit_tool_outputs_params, +) + +if TYPE_CHECKING: + from ....._client import OpenAI, AsyncOpenAI + +__all__ = ["Runs", "AsyncRuns"] + + +class Runs(SyncAPIResource): + steps: Steps + with_raw_response: RunsWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.steps = Steps(client) + self.with_raw_response = RunsWithRawResponse(self) + + def create( + self, + thread_id: str, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: Optional[str] | NotGiven = NOT_GIVEN, + tools: Optional[List[run_create_params.Tool]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Create a run. + + Args: + assistant_id: The ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + execute this run. + + instructions: Override the default system message of the assistant. This is useful for + modifying the behavior on a per-run basis. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}/runs", + body=maybe_transform( + { + "assistant_id": assistant_id, + "instructions": instructions, + "metadata": metadata, + "model": model, + "tools": tools, + }, + run_create_params.RunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + def retrieve( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Retrieves a run. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get( + f"/threads/{thread_id}/runs/{run_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + def update( + self, + run_id: str, + *, + thread_id: str, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Modifies a run. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}/runs/{run_id}", + body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[Run]: + """ + Returns a list of runs belonging to a thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/runs", + page=SyncCursorPage[Run], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + run_list_params.RunListParams, + ), + ), + model=Run, + ) + + def cancel( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Cancels a run that is `in_progress`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}/runs/{run_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + def submit_tool_outputs( + self, + run_id: str, + *, + thread_id: str, + tool_outputs: List[run_submit_tool_outputs_params.ToolOutput], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + tool calls once they're all completed. All outputs must be submitted in a single + request. + + Args: + tool_outputs: A list of tools for which the outputs are being submitted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", + body=maybe_transform( + {"tool_outputs": tool_outputs}, run_submit_tool_outputs_params.RunSubmitToolOutputsParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + +class AsyncRuns(AsyncAPIResource): + steps: AsyncSteps + with_raw_response: AsyncRunsWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.steps = AsyncSteps(client) + self.with_raw_response = AsyncRunsWithRawResponse(self) + + async def create( + self, + thread_id: str, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: Optional[str] | NotGiven = NOT_GIVEN, + tools: Optional[List[run_create_params.Tool]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Create a run. + + Args: + assistant_id: The ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + execute this run. + + instructions: Override the default system message of the assistant. This is useful for + modifying the behavior on a per-run basis. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/runs", + body=maybe_transform( + { + "assistant_id": assistant_id, + "instructions": instructions, + "metadata": metadata, + "model": model, + "tools": tools, + }, + run_create_params.RunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + async def retrieve( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Retrieves a run. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._get( + f"/threads/{thread_id}/runs/{run_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + async def update( + self, + run_id: str, + *, + thread_id: str, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Modifies a run. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/runs/{run_id}", + body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[Run, AsyncCursorPage[Run]]: + """ + Returns a list of runs belonging to a thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/runs", + page=AsyncCursorPage[Run], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + run_list_params.RunListParams, + ), + ), + model=Run, + ) + + async def cancel( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Cancels a run that is `in_progress`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/runs/{run_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + async def submit_tool_outputs( + self, + run_id: str, + *, + thread_id: str, + tool_outputs: List[run_submit_tool_outputs_params.ToolOutput], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + tool calls once they're all completed. All outputs must be submitted in a single + request. + + Args: + tool_outputs: A list of tools for which the outputs are being submitted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", + body=maybe_transform( + {"tool_outputs": tool_outputs}, run_submit_tool_outputs_params.RunSubmitToolOutputsParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + +class RunsWithRawResponse: + def __init__(self, runs: Runs) -> None: + self.steps = StepsWithRawResponse(runs.steps) + + self.create = to_raw_response_wrapper( + runs.create, + ) + self.retrieve = to_raw_response_wrapper( + runs.retrieve, + ) + self.update = to_raw_response_wrapper( + runs.update, + ) + self.list = to_raw_response_wrapper( + runs.list, + ) + self.cancel = to_raw_response_wrapper( + runs.cancel, + ) + self.submit_tool_outputs = to_raw_response_wrapper( + runs.submit_tool_outputs, + ) + + +class AsyncRunsWithRawResponse: + def __init__(self, runs: AsyncRuns) -> None: + self.steps = AsyncStepsWithRawResponse(runs.steps) + + self.create = async_to_raw_response_wrapper( + runs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + runs.retrieve, + ) + self.update = async_to_raw_response_wrapper( + runs.update, + ) + self.list = async_to_raw_response_wrapper( + runs.list, + ) + self.cancel = async_to_raw_response_wrapper( + runs.cancel, + ) + self.submit_tool_outputs = async_to_raw_response_wrapper( + runs.submit_tool_outputs, + ) diff --git a/site-packages/openai/resources/beta/threads/runs/steps.py b/site-packages/openai/resources/beta/threads/runs/steps.py new file mode 100644 index 0000000..4fcc87a --- /dev/null +++ b/site-packages/openai/resources/beta/threads/runs/steps.py @@ -0,0 +1,257 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing_extensions import Literal + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from .....pagination import SyncCursorPage, AsyncCursorPage +from ....._base_client import AsyncPaginator, make_request_options +from .....types.beta.threads.runs import RunStep, step_list_params + +if TYPE_CHECKING: + from ....._client import OpenAI, AsyncOpenAI + +__all__ = ["Steps", "AsyncSteps"] + + +class Steps(SyncAPIResource): + with_raw_response: StepsWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.with_raw_response = StepsWithRawResponse(self) + + def retrieve( + self, + step_id: str, + *, + thread_id: str, + run_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunStep: + """ + Retrieves a run step. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get( + f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunStep, + ) + + def list( + self, + run_id: str, + *, + thread_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[RunStep]: + """ + Returns a list of run steps belonging to a run. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/runs/{run_id}/steps", + page=SyncCursorPage[RunStep], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + step_list_params.StepListParams, + ), + ), + model=RunStep, + ) + + +class AsyncSteps(AsyncAPIResource): + with_raw_response: AsyncStepsWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.with_raw_response = AsyncStepsWithRawResponse(self) + + async def retrieve( + self, + step_id: str, + *, + thread_id: str, + run_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunStep: + """ + Retrieves a run step. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._get( + f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunStep, + ) + + def list( + self, + run_id: str, + *, + thread_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[RunStep, AsyncCursorPage[RunStep]]: + """ + Returns a list of run steps belonging to a run. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include before=obj_foo in order to + fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get_api_list( + f"/threads/{thread_id}/runs/{run_id}/steps", + page=AsyncCursorPage[RunStep], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + step_list_params.StepListParams, + ), + ), + model=RunStep, + ) + + +class StepsWithRawResponse: + def __init__(self, steps: Steps) -> None: + self.retrieve = to_raw_response_wrapper( + steps.retrieve, + ) + self.list = to_raw_response_wrapper( + steps.list, + ) + + +class AsyncStepsWithRawResponse: + def __init__(self, steps: AsyncSteps) -> None: + self.retrieve = async_to_raw_response_wrapper( + steps.retrieve, + ) + self.list = async_to_raw_response_wrapper( + steps.list, + ) diff --git a/site-packages/openai/resources/beta/threads/threads.py b/site-packages/openai/resources/beta/threads/threads.py new file mode 100644 index 0000000..9469fc0 --- /dev/null +++ b/site-packages/openai/resources/beta/threads/threads.py @@ -0,0 +1,543 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Optional + +import httpx + +from .runs import Runs, AsyncRuns, RunsWithRawResponse, AsyncRunsWithRawResponse +from .messages import ( + Messages, + AsyncMessages, + MessagesWithRawResponse, + AsyncMessagesWithRawResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_raw_response_wrapper, async_to_raw_response_wrapper +from ....types.beta import ( + Thread, + ThreadDeleted, + thread_create_params, + thread_update_params, + thread_create_and_run_params, +) +from ...._base_client import make_request_options +from ....types.beta.threads import Run + +if TYPE_CHECKING: + from ...._client import OpenAI, AsyncOpenAI + +__all__ = ["Threads", "AsyncThreads"] + + +class Threads(SyncAPIResource): + runs: Runs + messages: Messages + with_raw_response: ThreadsWithRawResponse + + def __init__(self, client: OpenAI) -> None: + super().__init__(client) + self.runs = Runs(client) + self.messages = Messages(client) + self.with_raw_response = ThreadsWithRawResponse(self) + + def create( + self, + *, + messages: List[thread_create_params.Message] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Thread: + """ + Create a thread. + + Args: + messages: A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + start the thread with. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + "/threads", + body=maybe_transform( + { + "messages": messages, + "metadata": metadata, + }, + thread_create_params.ThreadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Thread, + ) + + def retrieve( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Thread: + """ + Retrieves a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._get( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Thread, + ) + + def update( + self, + thread_id: str, + *, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Thread: + """ + Modifies a thread. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + f"/threads/{thread_id}", + body=maybe_transform({"metadata": metadata}, thread_update_params.ThreadUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Thread, + ) + + def delete( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadDeleted: + """ + Delete a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._delete( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadDeleted, + ) + + def create_and_run( + self, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: Optional[str] | NotGiven = NOT_GIVEN, + thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, + tools: Optional[List[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Create a thread and run it in one request. + + Args: + assistant_id: The ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + execute this run. + + instructions: Override the default system message of the assistant. This is useful for + modifying the behavior on a per-run basis. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. + + thread: If no thread is provided, an empty thread will be created. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return self._post( + "/threads/runs", + body=maybe_transform( + { + "assistant_id": assistant_id, + "instructions": instructions, + "metadata": metadata, + "model": model, + "thread": thread, + "tools": tools, + }, + thread_create_and_run_params.ThreadCreateAndRunParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + +class AsyncThreads(AsyncAPIResource): + runs: AsyncRuns + messages: AsyncMessages + with_raw_response: AsyncThreadsWithRawResponse + + def __init__(self, client: AsyncOpenAI) -> None: + super().__init__(client) + self.runs = AsyncRuns(client) + self.messages = AsyncMessages(client) + self.with_raw_response = AsyncThreadsWithRawResponse(self) + + async def create( + self, + *, + messages: List[thread_create_params.Message] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Thread: + """ + Create a thread. + + Args: + messages: A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + start the thread with. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + "/threads", + body=maybe_transform( + { + "messages": messages, + "metadata": metadata, + }, + thread_create_params.ThreadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Thread, + ) + + async def retrieve( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Thread: + """ + Retrieves a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._get( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Thread, + ) + + async def update( + self, + thread_id: str, + *, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Thread: + """ + Modifies a thread. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + f"/threads/{thread_id}", + body=maybe_transform({"metadata": metadata}, thread_update_params.ThreadUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Thread, + ) + + async def delete( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadDeleted: + """ + Delete a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._delete( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadDeleted, + ) + + async def create_and_run( + self, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: Optional[str] | NotGiven = NOT_GIVEN, + thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, + tools: Optional[List[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Run: + """ + Create a thread and run it in one request. + + Args: + assistant_id: The ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + execute this run. + + instructions: Override the default system message of the assistant. This is useful for + modifying the behavior on a per-run basis. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format. Keys + can be a maximum of 64 characters long and values can be a maxium of 512 + characters long. + + model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. + + thread: If no thread is provided, an empty thread will be created. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} + return await self._post( + "/threads/runs", + body=maybe_transform( + { + "assistant_id": assistant_id, + "instructions": instructions, + "metadata": metadata, + "model": model, + "thread": thread, + "tools": tools, + }, + thread_create_and_run_params.ThreadCreateAndRunParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Run, + ) + + +class ThreadsWithRawResponse: + def __init__(self, threads: Threads) -> None: + self.runs = RunsWithRawResponse(threads.runs) + self.messages = MessagesWithRawResponse(threads.messages) + + self.create = to_raw_response_wrapper( + threads.create, + ) + self.retrieve = to_raw_response_wrapper( + threads.retrieve, + ) + self.update = to_raw_response_wrapper( + threads.update, + ) + self.delete = to_raw_response_wrapper( + threads.delete, + ) + self.create_and_run = to_raw_response_wrapper( + threads.create_and_run, + ) + + +class AsyncThreadsWithRawResponse: + def __init__(self, threads: AsyncThreads) -> None: + self.runs = AsyncRunsWithRawResponse(threads.runs) + self.messages = AsyncMessagesWithRawResponse(threads.messages) + + self.create = async_to_raw_response_wrapper( + threads.create, + ) + self.retrieve = async_to_raw_response_wrapper( + threads.retrieve, + ) + self.update = async_to_raw_response_wrapper( + threads.update, + ) + self.delete = async_to_raw_response_wrapper( + threads.delete, + ) + self.create_and_run = async_to_raw_response_wrapper( + threads.create_and_run, + ) diff --git a/site-packages/openai/resources/chat/.DS_Store b/site-packages/openai/resources/chat/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 ChatCompletion: """ Creates a model response for the given chat conversation. @@ -88,18 +98,24 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - function_call: Controls how the model calls functions. "none" means the model will not call a - function and instead generates a message. "auto" means the model can pick - between generating a message or calling a function. Specifying a particular - function via `{"name": "my_function"}` forces the model to call that function. - "none" is the default when no functions are present. "auto" is the default if + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via `{"name": "my_function"}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto`` is the default if functions are present. - functions: A list of functions the model may generate JSON inputs for. + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or @@ -121,6 +137,25 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -136,6 +171,20 @@ class Completions(SyncAPIResource): We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via + `{"type: "function", "function": {"name": "my_function"}}` forces the model to + call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. + top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. @@ -164,6 +213,8 @@ class Completions(SyncAPIResource): model: Union[ str, Literal[ + "gpt-4-1106-preview", + "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", @@ -185,8 +236,12 @@ class Completions(SyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -194,7 +249,7 @@ class Completions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -220,18 +275,24 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - function_call: Controls how the model calls functions. "none" means the model will not call a - function and instead generates a message. "auto" means the model can pick - between generating a message or calling a function. Specifying a particular - function via `{"name": "my_function"}` forces the model to call that function. - "none" is the default when no functions are present. "auto" is the default if + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via `{"name": "my_function"}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto`` is the default if functions are present. - functions: A list of functions the model may generate JSON inputs for. + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or @@ -253,6 +314,25 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -261,6 +341,20 @@ class Completions(SyncAPIResource): We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via + `{"type: "function", "function": {"name": "my_function"}}` forces the model to + call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. + top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. @@ -289,6 +383,8 @@ class Completions(SyncAPIResource): model: Union[ str, Literal[ + "gpt-4-1106-preview", + "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", @@ -310,8 +406,12 @@ class Completions(SyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -319,7 +419,7 @@ class Completions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -345,18 +445,24 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - function_call: Controls how the model calls functions. "none" means the model will not call a - function and instead generates a message. "auto" means the model can pick - between generating a message or calling a function. Specifying a particular - function via `{"name": "my_function"}` forces the model to call that function. - "none" is the default when no functions are present. "auto" is the default if + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via `{"name": "my_function"}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto`` is the default if functions are present. - functions: A list of functions the model may generate JSON inputs for. + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or @@ -378,6 +484,25 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -386,6 +511,20 @@ class Completions(SyncAPIResource): We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via + `{"type: "function", "function": {"name": "my_function"}}` forces the model to + call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. + top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. @@ -414,6 +553,8 @@ class Completions(SyncAPIResource): model: Union[ str, Literal[ + "gpt-4-1106-preview", + "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", @@ -434,9 +575,13 @@ class Completions(SyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -444,7 +589,7 @@ class Completions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: return self._post( "/chat/completions", @@ -459,9 +604,13 @@ class Completions(SyncAPIResource): "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, "stop": stop, "stream": stream, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_p": top_p, "user": user, }, @@ -491,6 +640,8 @@ class AsyncCompletions(AsyncAPIResource): model: Union[ str, Literal[ + "gpt-4-1106-preview", + "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", @@ -511,9 +662,13 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -521,7 +676,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: """ Creates a model response for the given chat conversation. @@ -540,18 +695,24 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - function_call: Controls how the model calls functions. "none" means the model will not call a - function and instead generates a message. "auto" means the model can pick - between generating a message or calling a function. Specifying a particular - function via `{"name": "my_function"}` forces the model to call that function. - "none" is the default when no functions are present. "auto" is the default if + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via `{"name": "my_function"}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto`` is the default if functions are present. - functions: A list of functions the model may generate JSON inputs for. + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or @@ -573,6 +734,25 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -588,6 +768,20 @@ class AsyncCompletions(AsyncAPIResource): We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via + `{"type: "function", "function": {"name": "my_function"}}` forces the model to + call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. + top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. @@ -616,6 +810,8 @@ class AsyncCompletions(AsyncAPIResource): model: Union[ str, Literal[ + "gpt-4-1106-preview", + "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", @@ -637,8 +833,12 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -646,7 +846,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -672,18 +872,24 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - function_call: Controls how the model calls functions. "none" means the model will not call a - function and instead generates a message. "auto" means the model can pick - between generating a message or calling a function. Specifying a particular - function via `{"name": "my_function"}` forces the model to call that function. - "none" is the default when no functions are present. "auto" is the default if + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via `{"name": "my_function"}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto`` is the default if functions are present. - functions: A list of functions the model may generate JSON inputs for. + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or @@ -705,6 +911,25 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -713,6 +938,20 @@ class AsyncCompletions(AsyncAPIResource): We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via + `{"type: "function", "function": {"name": "my_function"}}` forces the model to + call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. + top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. @@ -741,6 +980,8 @@ class AsyncCompletions(AsyncAPIResource): model: Union[ str, Literal[ + "gpt-4-1106-preview", + "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", @@ -762,8 +1003,12 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -771,7 +1016,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -797,18 +1042,24 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - function_call: Controls how the model calls functions. "none" means the model will not call a - function and instead generates a message. "auto" means the model can pick - between generating a message or calling a function. Specifying a particular - function via `{"name": "my_function"}` forces the model to call that function. - "none" is the default when no functions are present. "auto" is the default if + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via `{"name": "my_function"}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto`` is the default if functions are present. - functions: A list of functions the model may generate JSON inputs for. + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or @@ -830,6 +1081,25 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -838,6 +1108,20 @@ class AsyncCompletions(AsyncAPIResource): We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) function is called by the model. `none` means the model + will not call a function and instead generates a message. `auto` means the model + can pick between generating a message or calling a function. Specifying a + particular function via + `{"type: "function", "function": {"name": "my_function"}}` forces the model to + call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. + top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. @@ -866,6 +1150,8 @@ class AsyncCompletions(AsyncAPIResource): model: Union[ str, Literal[ + "gpt-4-1106-preview", + "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", @@ -886,9 +1172,13 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -896,7 +1186,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: return await self._post( "/chat/completions", @@ -911,9 +1201,13 @@ class AsyncCompletions(AsyncAPIResource): "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, "stop": stop, "stream": stream, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_p": top_p, "user": user, }, diff --git a/site-packages/openai/resources/completions.py b/site-packages/openai/resources/completions.py index 26a3452..baf6f04 100644 --- a/site-packages/openai/resources/completions.py +++ b/site-packages/openai/resources/completions.py @@ -5,6 +5,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, Dict, List, Union, Optional, overload from typing_extensions import Literal +import httpx + from ..types import Completion, completion_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import required_args, maybe_transform @@ -54,6 +56,7 @@ class Completions(SyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -65,7 +68,7 @@ class Completions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion: """ Creates a completion for the provided prompt and parameters. @@ -104,7 +107,7 @@ class Completions(SyncAPIResource): logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the GPT + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits @@ -142,6 +145,13 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -209,6 +219,7 @@ class Completions(SyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -219,7 +230,7 @@ class Completions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -265,7 +276,7 @@ class Completions(SyncAPIResource): logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the GPT + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits @@ -303,6 +314,13 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -363,6 +381,7 @@ class Completions(SyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -373,7 +392,7 @@ class Completions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | Stream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -419,7 +438,7 @@ class Completions(SyncAPIResource): logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the GPT + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits @@ -457,6 +476,13 @@ class Completions(SyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -516,6 +542,7 @@ class Completions(SyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -527,7 +554,7 @@ class Completions(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | Stream[Completion]: return self._post( "/completions", @@ -543,6 +570,7 @@ class Completions(SyncAPIResource): "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, + "seed": seed, "stop": stop, "stream": stream, "suffix": suffix, @@ -596,6 +624,7 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -607,7 +636,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion: """ Creates a completion for the provided prompt and parameters. @@ -646,7 +675,7 @@ class AsyncCompletions(AsyncAPIResource): logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the GPT + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits @@ -684,6 +713,13 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -751,6 +787,7 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -761,7 +798,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -807,7 +844,7 @@ class AsyncCompletions(AsyncAPIResource): logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the GPT + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits @@ -845,6 +882,13 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -905,6 +949,7 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -915,7 +960,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | AsyncStream[Completion]: """ Creates a completion for the provided prompt and parameters. @@ -961,7 +1006,7 @@ class AsyncCompletions(AsyncAPIResource): logit_bias: Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the GPT + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits @@ -999,6 +1044,13 @@ class AsyncCompletions(AsyncAPIResource): [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -1058,6 +1110,7 @@ class AsyncCompletions(AsyncAPIResource): max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -1069,7 +1122,7 @@ class AsyncCompletions(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | AsyncStream[Completion]: return await self._post( "/completions", @@ -1085,6 +1138,7 @@ class AsyncCompletions(AsyncAPIResource): "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, + "seed": seed, "stop": stop, "stream": stream, "suffix": suffix, diff --git a/site-packages/openai/resources/edits.py b/site-packages/openai/resources/edits.py index 5c114c9..eafaa82 100644 --- a/site-packages/openai/resources/edits.py +++ b/site-packages/openai/resources/edits.py @@ -6,6 +6,8 @@ import typing_extensions from typing import TYPE_CHECKING, Union, Optional from typing_extensions import Literal +import httpx + from ..types import Edit, edit_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import maybe_transform @@ -43,7 +45,7 @@ class Edits(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Edit: """ Creates a new edit for the provided input, instruction, and parameters. @@ -122,7 +124,7 @@ class AsyncEdits(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Edit: """ Creates a new edit for the provided input, instruction, and parameters. diff --git a/site-packages/openai/resources/embeddings.py b/site-packages/openai/resources/embeddings.py index dd540fc..c31ad9d 100644 --- a/site-packages/openai/resources/embeddings.py +++ b/site-packages/openai/resources/embeddings.py @@ -6,6 +6,8 @@ import base64 from typing import TYPE_CHECKING, List, Union, cast from typing_extensions import Literal +import httpx + from ..types import CreateEmbeddingResponse, embedding_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import is_given, maybe_transform @@ -40,7 +42,7 @@ class Embeddings(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateEmbeddingResponse: """ Creates an embedding vector representing the input text. @@ -133,7 +135,7 @@ class AsyncEmbeddings(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateEmbeddingResponse: """ Creates an embedding vector representing the input text. diff --git a/site-packages/openai/resources/files.py b/site-packages/openai/resources/files.py index d2e674c..a6f75e5 100644 --- a/site-packages/openai/resources/files.py +++ b/site-packages/openai/resources/files.py @@ -3,15 +3,23 @@ from __future__ import annotations import time +import typing_extensions from typing import TYPE_CHECKING, Mapping, cast +from typing_extensions import Literal -from ..types import FileObject, FileDeleted, file_create_params +import httpx + +from ..types import FileObject, FileDeleted, file_list_params, file_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from .._utils import extract_files, maybe_transform, deepcopy_minimal from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_raw_response_wrapper, async_to_raw_response_wrapper from ..pagination import SyncPage, AsyncPage -from .._base_client import AsyncPaginator, make_request_options +from .._base_client import ( + AsyncPaginator, + HttpxBinaryResponseContent, + make_request_options, +) if TYPE_CHECKING: from .._client import OpenAI, AsyncOpenAI @@ -30,32 +38,38 @@ class Files(SyncAPIResource): self, *, file: FileTypes, - purpose: str, + purpose: Literal["fine-tune", "assistants"], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """Upload a file that can be used across various endpoints/features. - Currently, the - size of all the files uploaded by one organization can be up to 1 GB. Please - [contact us](https://help.openai.com/) if you need to increase the storage - limit. + The size of + all the files uploaded by one organization can be up to 100 GB. + + The size of individual files for can be a maximum of 512MB. See the + [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to + learn more about the types of files supported. The Fine-tuning API only supports + `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these + storage limits. Args: - file: The file object (not file name) to be uploaded. - - If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + file: The File object (not file name) to be uploaded. purpose: The intended purpose of the uploaded file. Use "fine-tune" for - [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). This - allows us to validate the format of the uploaded file is correct for - fine-tuning. + [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) and + "assistants" for + [Assistants](https://platform.openai.com/docs/api-reference/assistants) and + [Messages](https://platform.openai.com/docs/api-reference/messages). This allows + us to validate the format of the uploaded file is correct for fine-tuning. extra_headers: Send extra headers @@ -97,7 +111,7 @@ class Files(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """ Returns information about a specific file. @@ -122,19 +136,37 @@ class Files(SyncAPIResource): def list( self, *, + purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncPage[FileObject]: - """Returns a list of files that belong to the user's organization.""" + """ + Returns a list of files that belong to the user's organization. + + Args: + purpose: Only return files with the given purpose. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ return self._get_api_list( "/files", page=SyncPage[FileObject], options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), ), model=FileObject, ) @@ -148,7 +180,7 @@ class Files(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileDeleted: """ Delete a file. @@ -170,6 +202,38 @@ class Files(SyncAPIResource): cast_to=FileDeleted, ) + def content( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> HttpxBinaryResponseContent: + """ + Returns the contents of the specified file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=HttpxBinaryResponseContent, + ) + + @typing_extensions.deprecated("The `.content()` method should be used instead") def retrieve_content( self, file_id: str, @@ -179,7 +243,7 @@ class Files(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> str: """ Returns the contents of the specified file. @@ -237,32 +301,38 @@ class AsyncFiles(AsyncAPIResource): self, *, file: FileTypes, - purpose: str, + purpose: Literal["fine-tune", "assistants"], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """Upload a file that can be used across various endpoints/features. - Currently, the - size of all the files uploaded by one organization can be up to 1 GB. Please - [contact us](https://help.openai.com/) if you need to increase the storage - limit. + The size of + all the files uploaded by one organization can be up to 100 GB. + + The size of individual files for can be a maximum of 512MB. See the + [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to + learn more about the types of files supported. The Fine-tuning API only supports + `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these + storage limits. Args: - file: The file object (not file name) to be uploaded. - - If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + file: The File object (not file name) to be uploaded. purpose: The intended purpose of the uploaded file. Use "fine-tune" for - [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). This - allows us to validate the format of the uploaded file is correct for - fine-tuning. + [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) and + "assistants" for + [Assistants](https://platform.openai.com/docs/api-reference/assistants) and + [Messages](https://platform.openai.com/docs/api-reference/messages). This allows + us to validate the format of the uploaded file is correct for fine-tuning. extra_headers: Send extra headers @@ -304,7 +374,7 @@ class AsyncFiles(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """ Returns information about a specific file. @@ -329,19 +399,37 @@ class AsyncFiles(AsyncAPIResource): def list( self, *, + purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FileObject, AsyncPage[FileObject]]: - """Returns a list of files that belong to the user's organization.""" + """ + Returns a list of files that belong to the user's organization. + + Args: + purpose: Only return files with the given purpose. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ return self._get_api_list( "/files", page=AsyncPage[FileObject], options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), ), model=FileObject, ) @@ -355,7 +443,7 @@ class AsyncFiles(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileDeleted: """ Delete a file. @@ -377,6 +465,38 @@ class AsyncFiles(AsyncAPIResource): cast_to=FileDeleted, ) + async def content( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> HttpxBinaryResponseContent: + """ + Returns the contents of the specified file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=HttpxBinaryResponseContent, + ) + + @typing_extensions.deprecated("The `.content()` method should be used instead") async def retrieve_content( self, file_id: str, @@ -386,7 +506,7 @@ class AsyncFiles(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> str: """ Returns the contents of the specified file. @@ -447,8 +567,11 @@ class FilesWithRawResponse: self.delete = to_raw_response_wrapper( files.delete, ) - self.retrieve_content = to_raw_response_wrapper( - files.retrieve_content, + self.content = to_raw_response_wrapper( + files.content, + ) + self.retrieve_content = to_raw_response_wrapper( # pyright: ignore[reportDeprecated] + files.retrieve_content # pyright: ignore[reportDeprecated], ) @@ -466,6 +589,9 @@ class AsyncFilesWithRawResponse: self.delete = async_to_raw_response_wrapper( files.delete, ) - self.retrieve_content = async_to_raw_response_wrapper( - files.retrieve_content, + self.content = async_to_raw_response_wrapper( + files.content, + ) + self.retrieve_content = async_to_raw_response_wrapper( # pyright: ignore[reportDeprecated] + files.retrieve_content # pyright: ignore[reportDeprecated], ) diff --git a/site-packages/openai/resources/fine_tunes.py b/site-packages/openai/resources/fine_tunes.py index 28f4225..91c8201 100644 --- a/site-packages/openai/resources/fine_tunes.py +++ b/site-packages/openai/resources/fine_tunes.py @@ -5,6 +5,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, List, Union, Optional, overload from typing_extensions import Literal +import httpx + from ..types import ( FineTune, FineTuneEvent, @@ -53,7 +55,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTune: """ Creates a job that fine-tunes a specified model from a given dataset. @@ -197,7 +199,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTune: """ Gets info about the fine-tune job. @@ -229,7 +231,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncPage[FineTune]: """List your organization's fine-tuning jobs""" return self._get_api_list( @@ -250,7 +252,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTune: """ Immediately cancel a fine-tune job. @@ -283,7 +285,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> FineTuneEventsListResponse: """ Get fine-grained status updates for a fine-tune job. @@ -318,7 +320,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> Stream[FineTuneEvent]: """ Get fine-grained status updates for a fine-tune job. @@ -353,7 +355,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> FineTuneEventsListResponse | Stream[FineTuneEvent]: """ Get fine-grained status updates for a fine-tune job. @@ -387,7 +389,7 @@ class FineTunes(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> FineTuneEventsListResponse | Stream[FineTuneEvent]: return self._get( f"/fine-tunes/{fine_tune_id}/events", @@ -431,7 +433,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTune: """ Creates a job that fine-tunes a specified model from a given dataset. @@ -575,7 +577,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTune: """ Gets info about the fine-tune job. @@ -607,7 +609,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FineTune, AsyncPage[FineTune]]: """List your organization's fine-tuning jobs""" return self._get_api_list( @@ -628,7 +630,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTune: """ Immediately cancel a fine-tune job. @@ -661,7 +663,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> FineTuneEventsListResponse: """ Get fine-grained status updates for a fine-tune job. @@ -696,7 +698,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> AsyncStream[FineTuneEvent]: """ Get fine-grained status updates for a fine-tune job. @@ -731,7 +733,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> FineTuneEventsListResponse | AsyncStream[FineTuneEvent]: """ Get fine-grained status updates for a fine-tune job. @@ -765,7 +767,7 @@ class AsyncFineTunes(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = 86400, + timeout: float | httpx.Timeout | None | NotGiven = 86400, ) -> FineTuneEventsListResponse | AsyncStream[FineTuneEvent]: return await self._get( f"/fine-tunes/{fine_tune_id}/events", diff --git a/site-packages/openai/resources/fine_tuning/.DS_Store b/site-packages/openai/resources/fine_tuning/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 FineTuningJob: """ Creates a job that fine-tunes a specified model from a given dataset. @@ -126,7 +128,7 @@ class Jobs(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Get info about a fine-tuning job. @@ -160,7 +162,7 @@ class Jobs(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[FineTuningJob]: """ List your organization's fine-tuning jobs @@ -206,7 +208,7 @@ class Jobs(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. @@ -239,7 +241,7 @@ class Jobs(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[FineTuningJobEvent]: """ Get status updates for a fine-tuning job. @@ -297,7 +299,7 @@ class AsyncJobs(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Creates a job that fine-tunes a specified model from a given dataset. @@ -378,7 +380,7 @@ class AsyncJobs(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Get info about a fine-tuning job. @@ -412,7 +414,7 @@ class AsyncJobs(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FineTuningJob, AsyncCursorPage[FineTuningJob]]: """ List your organization's fine-tuning jobs @@ -458,7 +460,7 @@ class AsyncJobs(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. @@ -491,7 +493,7 @@ class AsyncJobs(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FineTuningJobEvent, AsyncCursorPage[FineTuningJobEvent]]: """ Get status updates for a fine-tuning job. diff --git a/site-packages/openai/resources/images.py b/site-packages/openai/resources/images.py index 1fd39b4..94b1bc1 100644 --- a/site-packages/openai/resources/images.py +++ b/site-packages/openai/resources/images.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Mapping, Optional, cast +from typing import TYPE_CHECKING, Union, Mapping, Optional, cast from typing_extensions import Literal +import httpx + from ..types import ( ImagesResponse, image_edit_params, @@ -34,6 +36,7 @@ class Images(SyncAPIResource): self, *, image: FileTypes, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -43,7 +46,7 @@ class Images(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates a variation of a given image. @@ -52,7 +55,11 @@ class Images(SyncAPIResource): image: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. - n: The number of images to generate. Must be between 1 and 10. + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. @@ -75,6 +82,7 @@ class Images(SyncAPIResource): body = deepcopy_minimal( { "image": image, + "model": model, "n": n, "response_format": response_format, "size": size, @@ -104,6 +112,7 @@ class Images(SyncAPIResource): image: FileTypes, prompt: str, mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -113,7 +122,7 @@ class Images(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an edited or extended image given an original image and a prompt. @@ -129,6 +138,9 @@ class Images(SyncAPIResource): indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + n: The number of images to generate. Must be between 1 and 10. response_format: The format in which the generated images are returned. Must be one of `url` or @@ -154,6 +166,7 @@ class Images(SyncAPIResource): "image": image, "prompt": prompt, "mask": mask, + "model": model, "n": n, "response_format": response_format, "size": size, @@ -181,31 +194,47 @@ class Images(SyncAPIResource): self, *, prompt: str, + model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an image given a prompt. Args: prompt: A text description of the desired image(s). The maximum length is 1000 - characters. + characters for `dall-e-2` and 4000 characters for `dall-e-3`. - n: The number of images to generate. Must be between 1 and 10. + model: The model to use for image generation. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + quality: The quality of the image that will be generated. `hd` creates images with finer + details and greater consistency across the image. This param is only supported + for `dall-e-3`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. + `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + `1024x1792` for `dall-e-3` models. + + style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid + causes the model to lean towards generating hyper-real and dramatic images. + Natural causes the model to produce more natural, less hyper-real looking + images. This param is only supported for `dall-e-3`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -224,9 +253,12 @@ class Images(SyncAPIResource): body=maybe_transform( { "prompt": prompt, + "model": model, "n": n, + "quality": quality, "response_format": response_format, "size": size, + "style": style, "user": user, }, image_generate_params.ImageGenerateParams, @@ -249,6 +281,7 @@ class AsyncImages(AsyncAPIResource): self, *, image: FileTypes, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -258,7 +291,7 @@ class AsyncImages(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates a variation of a given image. @@ -267,7 +300,11 @@ class AsyncImages(AsyncAPIResource): image: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. - n: The number of images to generate. Must be between 1 and 10. + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. @@ -290,6 +327,7 @@ class AsyncImages(AsyncAPIResource): body = deepcopy_minimal( { "image": image, + "model": model, "n": n, "response_format": response_format, "size": size, @@ -319,6 +357,7 @@ class AsyncImages(AsyncAPIResource): image: FileTypes, prompt: str, mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, @@ -328,7 +367,7 @@ class AsyncImages(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an edited or extended image given an original image and a prompt. @@ -344,6 +383,9 @@ class AsyncImages(AsyncAPIResource): indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + n: The number of images to generate. Must be between 1 and 10. response_format: The format in which the generated images are returned. Must be one of `url` or @@ -369,6 +411,7 @@ class AsyncImages(AsyncAPIResource): "image": image, "prompt": prompt, "mask": mask, + "model": model, "n": n, "response_format": response_format, "size": size, @@ -396,31 +439,47 @@ class AsyncImages(AsyncAPIResource): self, *, prompt: str, + model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an image given a prompt. Args: prompt: A text description of the desired image(s). The maximum length is 1000 - characters. + characters for `dall-e-2` and 4000 characters for `dall-e-3`. - n: The number of images to generate. Must be between 1 and 10. + model: The model to use for image generation. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + quality: The quality of the image that will be generated. `hd` creates images with finer + details and greater consistency across the image. This param is only supported + for `dall-e-3`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. + `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + `1024x1792` for `dall-e-3` models. + + style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid + causes the model to lean towards generating hyper-real and dramatic images. + Natural causes the model to produce more natural, less hyper-real looking + images. This param is only supported for `dall-e-3`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -439,9 +498,12 @@ class AsyncImages(AsyncAPIResource): body=maybe_transform( { "prompt": prompt, + "model": model, "n": n, + "quality": quality, "response_format": response_format, "size": size, + "style": style, "user": user, }, image_generate_params.ImageGenerateParams, diff --git a/site-packages/openai/resources/models.py b/site-packages/openai/resources/models.py index 689bbd6..2d04bdc 100644 --- a/site-packages/openai/resources/models.py +++ b/site-packages/openai/resources/models.py @@ -4,6 +4,8 @@ from __future__ import annotations from typing import TYPE_CHECKING +import httpx + from ..types import Model, ModelDeleted from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._resource import SyncAPIResource, AsyncAPIResource @@ -33,7 +35,7 @@ class Models(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Model: """ Retrieves a model instance, providing basic information about the model such as @@ -64,7 +66,7 @@ class Models(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncPage[Model]: """ Lists the currently available models, and provides basic information about each @@ -88,7 +90,7 @@ class Models(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelDeleted: """Delete a fine-tuned model. @@ -129,7 +131,7 @@ class AsyncModels(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Model: """ Retrieves a model instance, providing basic information about the model such as @@ -160,7 +162,7 @@ class AsyncModels(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[Model, AsyncPage[Model]]: """ Lists the currently available models, and provides basic information about each @@ -184,7 +186,7 @@ class AsyncModels(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelDeleted: """Delete a fine-tuned model. diff --git a/site-packages/openai/resources/moderations.py b/site-packages/openai/resources/moderations.py index 1ee3e72..12a7c68 100644 --- a/site-packages/openai/resources/moderations.py +++ b/site-packages/openai/resources/moderations.py @@ -5,6 +5,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, List, Union from typing_extensions import Literal +import httpx + from ..types import ModerationCreateResponse, moderation_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import maybe_transform @@ -35,7 +37,7 @@ class Moderations(SyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: """ Classifies if text violates OpenAI's Content Policy @@ -93,7 +95,7 @@ class AsyncModerations(AsyncAPIResource): extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: """ Classifies if text violates OpenAI's Content Policy diff --git a/site-packages/openai/types/.DS_Store b/site-packages/openai/types/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0{(yPC-Hfl zNm8ZM9=wQ@8JK*N$;^g)SuzO#i1xJC0H^?fgG!hyVe^B~IO&SytcOtOZ*(DmE)1X# z{z^1E{v!jlcV*lyABOOPd-uJ6ewdDjVXQ)ocn^nhG@jJ!A0n48Y;6~vqEmM6yn#x+ zNidm?+rj9DdY4MY;ld8WtEiv)mEALyOoAxsk90!R>to36b(Hi}+E&w~H`ckH8E{HY z$*)vrvu3kVlg(CZUX!zvX1ykl4v**alC!seaC+fC#!rcQHIxec`?Ty>T)-O|YgYH{ zjgnX;4;Zn?USu&c1Iz$3u;~ogbI&Pn`a*eq%m6d+YX<0ikf?;N#mt~SIlC7;Pi=$dAQSVVnC@wSjm4b#Y g#TZMaxQeO;{gMns*J5T6Jt%w;&@^zv4E!krpB8RS`Tzg` diff --git a/site-packages/sniffio/_tests/.DS_Store b/site-packages/sniffio/_tests/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0