Sync: devchat[main](6efb0ad6) Merge pull request #413 from devchat-ai/fix_openai_package_issue
This commit is contained in:
parent
aac7c21166
commit
d93d7789af
@ -1,14 +0,0 @@
|
||||
certifi-2024.7.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
certifi-2024.7.4.dist-info/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
|
||||
certifi-2024.7.4.dist-info/METADATA,sha256=L9_EuPoQQvHFzxu03_ctaEZxhEty7inz569jGWjlLGo,2221
|
||||
certifi-2024.7.4.dist-info/RECORD,,
|
||||
certifi-2024.7.4.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
|
||||
certifi-2024.7.4.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||
certifi/__init__.py,sha256=LHXz7E80YJYBzCBv6ZyidQ5-ciYSkSebpY2E5OM0l7o,94
|
||||
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
||||
certifi/__pycache__/__init__.cpython-38.pyc,,
|
||||
certifi/__pycache__/__main__.cpython-38.pyc,,
|
||||
certifi/__pycache__/core.cpython-38.pyc,,
|
||||
certifi/cacert.pem,sha256=SIupYGAr8HzGP073rsEIaS_sQYIPwzKKjj894DgUmu4,291528
|
||||
certifi/core.py,sha256=qRDDFyXVJwTB_EmoGppaXU_R9qCZvhl-EzxPMuV3nTA,4426
|
||||
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: certifi
|
||||
Version: 2024.7.4
|
||||
Version: 2024.8.30
|
||||
Summary: Python package for providing Mozilla's CA Bundle.
|
||||
Home-page: https://github.com/certifi/python-certifi
|
||||
Author: Kenneth Reitz
|
14
site-packages/certifi-2024.8.30.dist-info/RECORD
Normal file
14
site-packages/certifi-2024.8.30.dist-info/RECORD
Normal file
@ -0,0 +1,14 @@
|
||||
certifi-2024.8.30.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
certifi-2024.8.30.dist-info/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
|
||||
certifi-2024.8.30.dist-info/METADATA,sha256=GhBHRVUN6a4ZdUgE_N5wmukJfyuoE-QyIl8Y3ifNQBM,2222
|
||||
certifi-2024.8.30.dist-info/RECORD,,
|
||||
certifi-2024.8.30.dist-info/WHEEL,sha256=UvcQYKBHoFqaQd6LKyqHw9fxEolWLQnlzP0h_LgJAfI,91
|
||||
certifi-2024.8.30.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||
certifi/__init__.py,sha256=p_GYZrjUwPBUhpLlCZoGb0miKBKSqDAyZC5DvIuqbHQ,94
|
||||
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
||||
certifi/__pycache__/__init__.cpython-38.pyc,,
|
||||
certifi/__pycache__/__main__.cpython-38.pyc,,
|
||||
certifi/__pycache__/core.cpython-38.pyc,,
|
||||
certifi/cacert.pem,sha256=lO3rZukXdPyuk6BWUJFOKQliWaXH6HGh9l1GGrUgG0c,299427
|
||||
certifi/core.py,sha256=qRDDFyXVJwTB_EmoGppaXU_R9qCZvhl-EzxPMuV3nTA,4426
|
||||
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@ -1,5 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (73.0.1)
|
||||
Generator: setuptools (74.0.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@ -1,4 +1,4 @@
|
||||
from .core import contents, where
|
||||
|
||||
__all__ = ["contents", "where"]
|
||||
__version__ = "2024.07.04"
|
||||
__version__ = "2024.08.30"
|
||||
|
@ -4796,3 +4796,134 @@ PQQDAwNoADBlAjAdfKR7w4l1M+E7qUW/Runpod3JIha3RxEL2Jq68cgLcFBTApFw
|
||||
hVmpHqTm6iMxoAACMQD94vizrxa5HnPEluPBMBnYfubDl94cT7iJLzPrSA8Z94dG
|
||||
XSaQpYXFuXqUPoeovQA=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA
|
||||
# Subject: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA
|
||||
# Label: "TWCA CYBER Root CA"
|
||||
# Serial: 85076849864375384482682434040119489222
|
||||
# MD5 Fingerprint: 0b:33:a0:97:52:95:d4:a9:fd:bb:db:6e:a3:55:5b:51
|
||||
# SHA1 Fingerprint: f6:b1:1c:1a:83:38:e9:7b:db:b3:a8:c8:33:24:e0:2d:9c:7f:26:66
|
||||
# SHA256 Fingerprint: 3f:63:bb:28:14:be:17:4e:c8:b6:43:9c:f0:8d:6d:56:f0:b7:c4:05:88:3a:56:48:a3:34:42:4d:6b:3e:c5:58
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFjTCCA3WgAwIBAgIQQAE0jMIAAAAAAAAAATzyxjANBgkqhkiG9w0BAQwFADBQ
|
||||
MQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FOLUNBMRAwDgYDVQQLEwdSb290
|
||||
IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3QgQ0EwHhcNMjIxMTIyMDY1NDI5
|
||||
WhcNNDcxMTIyMTU1OTU5WjBQMQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FO
|
||||
LUNBMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3Qg
|
||||
Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDG+Moe2Qkgfh1sTs6P
|
||||
40czRJzHyWmqOlt47nDSkvgEs1JSHWdyKKHfi12VCv7qze33Kc7wb3+szT3vsxxF
|
||||
avcokPFhV8UMxKNQXd7UtcsZyoC5dc4pztKFIuwCY8xEMCDa6pFbVuYdHNWdZsc/
|
||||
34bKS1PE2Y2yHer43CdTo0fhYcx9tbD47nORxc5zb87uEB8aBs/pJ2DFTxnk684i
|
||||
JkXXYJndzk834H/nY62wuFm40AZoNWDTNq5xQwTxaWV4fPMf88oon1oglWa0zbfu
|
||||
j3ikRRjpJi+NmykosaS3Om251Bw4ckVYsV7r8Cibt4LK/c/WMw+f+5eesRycnupf
|
||||
Xtuq3VTpMCEobY5583WSjCb+3MX2w7DfRFlDo7YDKPYIMKoNM+HvnKkHIuNZW0CP
|
||||
2oi3aQiotyMuRAlZN1vH4xfyIutuOVLF3lSnmMlLIJXcRolftBL5hSmO68gnFSDA
|
||||
S9TMfAxsNAwmmyYxpjyn9tnQS6Jk/zuZQXLB4HCX8SS7K8R0IrGsayIyJNN4KsDA
|
||||
oS/xUgXJP+92ZuJF2A09rZXIx4kmyA+upwMu+8Ff+iDhcK2wZSA3M2Cw1a/XDBzC
|
||||
kHDXShi8fgGwsOsVHkQGzaRP6AzRwyAQ4VRlnrZR0Bp2a0JaWHY06rc3Ga4udfmW
|
||||
5cFZ95RXKSWNOkyrTZpB0F8mAwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYD
|
||||
VR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBSdhWEUfMFib5do5E83QOGt4A1WNzAd
|
||||
BgNVHQ4EFgQUnYVhFHzBYm+XaORPN0DhreANVjcwDQYJKoZIhvcNAQEMBQADggIB
|
||||
AGSPesRiDrWIzLjHhg6hShbNcAu3p4ULs3a2D6f/CIsLJc+o1IN1KriWiLb73y0t
|
||||
tGlTITVX1olNc79pj3CjYcya2x6a4CD4bLubIp1dhDGaLIrdaqHXKGnK/nZVekZn
|
||||
68xDiBaiA9a5F/gZbG0jAn/xX9AKKSM70aoK7akXJlQKTcKlTfjF/biBzysseKNn
|
||||
TKkHmvPfXvt89YnNdJdhEGoHK4Fa0o635yDRIG4kqIQnoVesqlVYL9zZyvpoBJ7t
|
||||
RCT5dEA7IzOrg1oYJkK2bVS1FmAwbLGg+LhBoF1JSdJlBTrq/p1hvIbZv97Tujqx
|
||||
f36SNI7JAG7cmL3c7IAFrQI932XtCwP39xaEBDG6k5TY8hL4iuO/Qq+n1M0RFxbI
|
||||
Qh0UqEL20kCGoE8jypZFVmAGzbdVAaYBlGX+bgUJurSkquLvWL69J1bY73NxW0Qz
|
||||
8ppy6rBePm6pUlvscG21h483XjyMnM7k8M4MZ0HMzvaAq07MTFb1wWFZk7Q+ptq4
|
||||
NxKfKjLji7gh7MMrZQzvIt6IKTtM1/r+t+FHvpw+PoP7UV31aPcuIYXcv/Fa4nzX
|
||||
xeSDwWrruoBa3lwtcHb4yOWHh8qgnaHlIhInD0Q9HWzq1MKLL295q39QpsQZp6F6
|
||||
t5b5wR9iWqJDB0BeJsas7a5wFsWqynKKTbDPAYsDP27X
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd.
|
||||
# Subject: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd.
|
||||
# Label: "SecureSign Root CA12"
|
||||
# Serial: 587887345431707215246142177076162061960426065942
|
||||
# MD5 Fingerprint: c6:89:ca:64:42:9b:62:08:49:0b:1e:7f:e9:07:3d:e8
|
||||
# SHA1 Fingerprint: 7a:22:1e:3d:de:1b:06:ac:9e:c8:47:70:16:8e:3c:e5:f7:6b:06:f4
|
||||
# SHA256 Fingerprint: 3f:03:4b:b5:70:4d:44:b2:d0:85:45:a0:20:57:de:93:eb:f3:90:5f:ce:72:1a:cb:c7:30:c0:6d:da:ee:90:4e
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDcjCCAlqgAwIBAgIUZvnHwa/swlG07VOX5uaCwysckBYwDQYJKoZIhvcNAQEL
|
||||
BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u
|
||||
LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExMjAeFw0yMDA0MDgw
|
||||
NTM2NDZaFw00MDA0MDgwNTM2NDZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD
|
||||
eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS
|
||||
b290IENBMTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6OcE3emhF
|
||||
KxS06+QT61d1I02PJC0W6K6OyX2kVzsqdiUzg2zqMoqUm048luT9Ub+ZyZN+v/mt
|
||||
p7JIKwccJ/VMvHASd6SFVLX9kHrko+RRWAPNEHl57muTH2SOa2SroxPjcf59q5zd
|
||||
J1M3s6oYwlkm7Fsf0uZlfO+TvdhYXAvA42VvPMfKWeP+bl+sg779XSVOKik71gur
|
||||
FzJ4pOE+lEa+Ym6b3kaosRbnhW70CEBFEaCeVESE99g2zvVQR9wsMJvuwPWW0v4J
|
||||
hscGWa5Pro4RmHvzC1KqYiaqId+OJTN5lxZJjfU+1UefNzFJM3IFTQy2VYzxV4+K
|
||||
h9GtxRESOaCtAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
|
||||
AgEGMB0GA1UdDgQWBBRXNPN0zwRL1SXm8UC2LEzZLemgrTANBgkqhkiG9w0BAQsF
|
||||
AAOCAQEAPrvbFxbS8hQBICw4g0utvsqFepq2m2um4fylOqyttCg6r9cBg0krY6Ld
|
||||
mmQOmFxv3Y67ilQiLUoT865AQ9tPkbeGGuwAtEGBpE/6aouIs3YIcipJQMPTw4WJ
|
||||
mBClnW8Zt7vPemVV2zfrPIpyMpcemik+rY3moxtt9XUa5rBouVui7mlHJzWhhpmA
|
||||
8zNL4WukJsPvdFlseqJkth5Ew1DgDzk9qTPxpfPSvWKErI4cqc1avTc7bgoitPQV
|
||||
55FYxTpE05Uo2cBl6XLK0A+9H7MV2anjpEcJnuDLN/v9vZfVvhgaaaI5gdka9at/
|
||||
yOPiZwud9AzqVN/Ssq+xIvEg37xEHA==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd.
|
||||
# Subject: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd.
|
||||
# Label: "SecureSign Root CA14"
|
||||
# Serial: 575790784512929437950770173562378038616896959179
|
||||
# MD5 Fingerprint: 71:0d:72:fa:92:19:65:5e:89:04:ac:16:33:f0:bc:d5
|
||||
# SHA1 Fingerprint: dd:50:c0:f7:79:b3:64:2e:74:a2:b8:9d:9f:d3:40:dd:bb:f0:f2:4f
|
||||
# SHA256 Fingerprint: 4b:00:9c:10:34:49:4f:9a:b5:6b:ba:3b:a1:d6:27:31:fc:4d:20:d8:95:5a:dc:ec:10:a9:25:60:72:61:e3:38
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFcjCCA1qgAwIBAgIUZNtaDCBO6Ncpd8hQJ6JaJ90t8sswDQYJKoZIhvcNAQEM
|
||||
BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u
|
||||
LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNDAeFw0yMDA0MDgw
|
||||
NzA2MTlaFw00NTA0MDgwNzA2MTlaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD
|
||||
eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS
|
||||
b290IENBMTQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDF0nqh1oq/
|
||||
FjHQmNE6lPxauG4iwWL3pwon71D2LrGeaBLwbCRjOfHw3xDG3rdSINVSW0KZnvOg
|
||||
vlIfX8xnbacuUKLBl422+JX1sLrcneC+y9/3OPJH9aaakpUqYllQC6KxNedlsmGy
|
||||
6pJxaeQp8E+BgQQ8sqVb1MWoWWd7VRxJq3qdwudzTe/NCcLEVxLbAQ4jeQkHO6Lo
|
||||
/IrPj8BGJJw4J+CDnRugv3gVEOuGTgpa/d/aLIJ+7sr2KeH6caH3iGicnPCNvg9J
|
||||
kdjqOvn90Ghx2+m1K06Ckm9mH+Dw3EzsytHqunQG+bOEkJTRX45zGRBdAuVwpcAQ
|
||||
0BB8b8VYSbSwbprafZX1zNoCr7gsfXmPvkPx+SgojQlD+Ajda8iLLCSxjVIHvXib
|
||||
y8posqTdDEx5YMaZ0ZPxMBoH064iwurO8YQJzOAUbn8/ftKChazcqRZOhaBgy/ac
|
||||
18izju3Gm5h1DVXoX+WViwKkrkMpKBGk5hIwAUt1ax5mnXkvpXYvHUC0bcl9eQjs
|
||||
0Wq2XSqypWa9a4X0dFbD9ed1Uigspf9mR6XU/v6eVL9lfgHWMI+lNpyiUBzuOIAB
|
||||
SMbHdPTGrMNASRZhdCyvjG817XsYAFs2PJxQDcqSMxDxJklt33UkN4Ii1+iW/RVL
|
||||
ApY+B3KVfqs9TC7XyvDf4Fg/LS8EmjijAQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
|
||||
AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUBpOjCl4oaTeqYR3r6/wtbyPk
|
||||
86AwDQYJKoZIhvcNAQEMBQADggIBAJaAcgkGfpzMkwQWu6A6jZJOtxEaCnFxEM0E
|
||||
rX+lRVAQZk5KQaID2RFPeje5S+LGjzJmdSX7684/AykmjbgWHfYfM25I5uj4V7Ib
|
||||
ed87hwriZLoAymzvftAj63iP/2SbNDefNWWipAA9EiOWWF3KY4fGoweITedpdopT
|
||||
zfFP7ELyk+OZpDc8h7hi2/DsHzc/N19DzFGdtfCXwreFamgLRB7lUe6TzktuhsHS
|
||||
DCRZNhqfLJGP4xjblJUK7ZGqDpncllPjYYPGFrojutzdfhrGe0K22VoF3Jpf1d+4
|
||||
2kd92jjbrDnVHmtsKheMYc2xbXIBw8MgAGJoFjHVdqqGuw6qnsb58Nn4DSEC5MUo
|
||||
FlkRudlpcyqSeLiSV5sI8jrlL5WwWLdrIBRtFO8KvH7YVdiI2i/6GaX7i+B/OfVy
|
||||
K4XELKzvGUWSTLNhB9xNH27SgRNcmvMSZ4PPmz+Ln52kuaiWA3rF7iDeM9ovnhp6
|
||||
dB7h7sxaOgTdsxoEqBRjrLdHEoOabPXm6RUVkRqEGQ6UROcSjiVbgGcZ3GOTEAtl
|
||||
Lor6CZpO2oYofaphNdgOpygau1LgePhsumywbrmHXumZNTfxPWQrqaA0k89jL9WB
|
||||
365jJ6UeTo3cKXhZ+PmhIIynJkBugnLNeLLIjzwec+fBH7/PzqUqm9tEZDKgu39c
|
||||
JRNItX+S
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd.
|
||||
# Subject: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd.
|
||||
# Label: "SecureSign Root CA15"
|
||||
# Serial: 126083514594751269499665114766174399806381178503
|
||||
# MD5 Fingerprint: 13:30:fc:c4:62:a6:a9:de:b5:c1:68:af:b5:d2:31:47
|
||||
# SHA1 Fingerprint: cb:ba:83:c8:c1:5a:5d:f1:f9:73:6f:ca:d7:ef:28:13:06:4a:07:7d
|
||||
# SHA256 Fingerprint: e7:78:f0:f0:95:fe:84:37:29:cd:1a:00:82:17:9e:53:14:a9:c2:91:44:28:05:e1:fb:1d:8f:b6:b8:88:6c:3a
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICIzCCAamgAwIBAgIUFhXHw9hJp75pDIqI7fBw+d23PocwCgYIKoZIzj0EAwMw
|
||||
UTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28uLCBM
|
||||
dGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNTAeFw0yMDA0MDgwODMy
|
||||
NTZaFw00NTA0MDgwODMyNTZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpDeWJl
|
||||
cnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBSb290
|
||||
IENBMTUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQLUHSNZDKZmbPSYAi4Io5GdCx4
|
||||
wCtELW1fHcmuS1Iggz24FG1Th2CeX2yF2wYUleDHKP+dX+Sq8bOLbe1PL0vJSpSR
|
||||
ZHX+AezB2Ot6lHhWGENfa4HL9rzatAy2KZMIaY+jQjBAMA8GA1UdEwEB/wQFMAMB
|
||||
Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTrQciu/NWeUUj1vYv0hyCTQSvT
|
||||
9DAKBggqhkjOPQQDAwNoADBlAjEA2S6Jfl5OpBEHvVnCB96rMjhTKkZEBhd6zlHp
|
||||
4P9mLQlO4E/0BdGF9jVg3PVys0Z9AjBEmEYagoUeYWmJSwdLZrWeqrqgHkHZAXQ6
|
||||
bkU6iYAZezKYVWOr62Nuk22rGwlgMU4=
|
||||
-----END CERTIFICATE-----
|
||||
|
@ -4,7 +4,7 @@ charset_normalizer-3.3.2.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_v
|
||||
charset_normalizer-3.3.2.dist-info/METADATA,sha256=cfLhl5A6SI-F0oclm8w8ux9wshL1nipdeCdVnYb4AaA,33550
|
||||
charset_normalizer-3.3.2.dist-info/RECORD,,
|
||||
charset_normalizer-3.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
charset_normalizer-3.3.2.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
|
||||
charset_normalizer-3.3.2.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
|
||||
charset_normalizer-3.3.2.dist-info/entry_points.txt,sha256=ADSTKrkXZ3hhdOVFi6DcUEHQRS0xfxDIE_pEz4wLIXA,65
|
||||
charset_normalizer-3.3.2.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
|
||||
charset_normalizer/__init__.py,sha256=UzI3xC8PhmcLRMzSgPb6minTmRq0kWznnCBJ8ZCc2XI,1577
|
||||
|
@ -1,5 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (72.2.0)
|
||||
Generator: setuptools (73.0.1)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
|
@ -28,7 +28,7 @@ Requires-Dist: importlib-metadata (>=6.8.0,<7.0.0)
|
||||
Requires-Dist: importlib-resources (>=6.1.1,<7.0.0)
|
||||
Requires-Dist: loguru (>=0.7.2,<0.8.0)
|
||||
Requires-Dist: networkx (>=3.1,<4.0)
|
||||
Requires-Dist: openai (>=1.0rc,<2.0)
|
||||
Requires-Dist: openai (==1.35.15)
|
||||
Requires-Dist: oyaml (>=1.0,<2.0)
|
||||
Requires-Dist: pathspec (>=0.12.1,<0.13.0)
|
||||
Requires-Dist: pydantic (==1.10.14)
|
||||
|
@ -1,7 +1,7 @@
|
||||
../../../bin/devchat,sha256=a8KMZYH-GZd6OA7nXki105OsOlnCcZkv9SCnT1Fa3UU,260
|
||||
devchat-0.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
devchat-0.3.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
||||
devchat-0.3.0.dist-info/METADATA,sha256=zrCPA6D8YFQ5XLEambaBb5ZBTQxt5vCr8PGlUvG-XYU,7530
|
||||
devchat-0.3.0.dist-info/METADATA,sha256=BifXZt6yw-7ywQBDIdIPpQRTxHohane30hqSh0F-TBc,7527
|
||||
devchat-0.3.0.dist-info/RECORD,,
|
||||
devchat-0.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
devchat-0.3.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
||||
|
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: importlib_resources
|
||||
Version: 6.4.4
|
||||
Version: 6.4.5
|
||||
Summary: Read resources from Python packages
|
||||
Author-email: Barry Warsaw <barry@python.org>
|
||||
Maintainer-email: "Jason R. Coombs" <jaraco@jaraco.com>
|
@ -1,10 +1,10 @@
|
||||
importlib_resources-6.4.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
importlib_resources-6.4.4.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
||||
importlib_resources-6.4.4.dist-info/METADATA,sha256=nIjjcUGZZ403ATWQY_lRxkAk-vsdmIPDJ55Cgwq_Oc8,3975
|
||||
importlib_resources-6.4.4.dist-info/RECORD,,
|
||||
importlib_resources-6.4.4.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
|
||||
importlib_resources-6.4.4.dist-info/top_level.txt,sha256=fHIjHU1GZwAjvcydpmUnUrTnbvdiWjG4OEVZK8by0TQ,20
|
||||
importlib_resources/__init__.py,sha256=l4_Lf8tsSwcMPZwGNzTx1cogL298BMyg9PZjQfHEfsk,506
|
||||
importlib_resources-6.4.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
importlib_resources-6.4.5.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
||||
importlib_resources-6.4.5.dist-info/METADATA,sha256=iwGfOzcc9jUBSuQa6zQeXla0el4YCZlAoj5uRIm7fyg,3975
|
||||
importlib_resources-6.4.5.dist-info/RECORD,,
|
||||
importlib_resources-6.4.5.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
|
||||
importlib_resources-6.4.5.dist-info/top_level.txt,sha256=fHIjHU1GZwAjvcydpmUnUrTnbvdiWjG4OEVZK8by0TQ,20
|
||||
importlib_resources/__init__.py,sha256=3J-261Qqzg-1kBenMVsOsvJo19EbUmYqjHkrZWiFHOM,703
|
||||
importlib_resources/__pycache__/__init__.cpython-38.pyc,,
|
||||
importlib_resources/__pycache__/_adapters.cpython-38.pyc,,
|
||||
importlib_resources/__pycache__/_common.cpython-38.pyc,,
|
||||
@ -14,7 +14,7 @@ importlib_resources/__pycache__/abc.cpython-38.pyc,,
|
||||
importlib_resources/__pycache__/readers.cpython-38.pyc,,
|
||||
importlib_resources/__pycache__/simple.cpython-38.pyc,,
|
||||
importlib_resources/_adapters.py,sha256=vprJGbUeHbajX6XCuMP6J3lMrqCi-P_MTlziJUR7jfk,4482
|
||||
importlib_resources/_common.py,sha256=BK_ZixQYz25eUKsdIqcVWVPsySBUhk4GwFzkyI5WRgg,5622
|
||||
importlib_resources/_common.py,sha256=5PVT4ezn_Ptj7LIAebtLYquK7A6X4EYoQJM37yTBdbQ,5624
|
||||
importlib_resources/_functional.py,sha256=mLU4DwSlh8_2IXWqwKOfPVxyRqAEpB3B4XTfRxr3X3M,2651
|
||||
importlib_resources/_itertools.py,sha256=eDisV6RqiNZOogLSXf6LOGHOYc79FGgPrKNLzFLmCrU,1277
|
||||
importlib_resources/abc.py,sha256=UKNU9ncEDkZRB3txcGb3WLxsL2iju9JbaLTI-dfLE_4,5162
|
||||
@ -29,7 +29,7 @@ importlib_resources/future/__pycache__/__init__.cpython-38.pyc,,
|
||||
importlib_resources/future/__pycache__/adapters.cpython-38.pyc,,
|
||||
importlib_resources/future/adapters.py,sha256=1-MF2VRcCButhcC1OMfZILU9o3kwZ4nXB2lurXpaIAw,2940
|
||||
importlib_resources/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
importlib_resources/readers.py,sha256=Kc6pqTGaqtuW0l3kIkCOlqsNLaQLRJxwDYdktwedtHM,5930
|
||||
importlib_resources/readers.py,sha256=n9Rn8B5UHapkXGSfFhQNbdk_pfDCISPBLIXZnpoOKs8,6251
|
||||
importlib_resources/simple.py,sha256=wJm2qGZ9EMPFhRLiJBa9Em5tVKbD7Q8ibWtt4ZNgWBU,2590
|
||||
importlib_resources/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
importlib_resources/tests/__pycache__/__init__.cpython-38.pyc,,
|
||||
@ -56,7 +56,7 @@ importlib_resources/tests/compat/py39.py,sha256=nOjut4CZDtRgZEblX9hWhupo9vli_CA1
|
||||
importlib_resources/tests/test_compatibilty_files.py,sha256=95N_R7aik8cvnE6sBJpsxmP0K5plOWRIJDgbalD-Hpw,3314
|
||||
importlib_resources/tests/test_contents.py,sha256=EagRx9Mz7MOe1kRaOc9XNX_YlBYy90Qzhv2rqWMhMlw,837
|
||||
importlib_resources/tests/test_custom.py,sha256=QrHZqIWl0e-fsQRfm0ych8stOlKJOsAIU3rK6QOcyN0,1221
|
||||
importlib_resources/tests/test_files.py,sha256=rd0uy7oXblm4GTblkMS0os4MM2QKiAD8wnhl4X65JJo,5030
|
||||
importlib_resources/tests/test_files.py,sha256=4G4Wo4_2UJtpg9HrEKjRaXimM68lXKjkAmFY-9OLCyE,5796
|
||||
importlib_resources/tests/test_functional.py,sha256=DV8sdnwtpacNjHQ3ExifIoeQTCr4C-M-NVbaWewOcAo,8863
|
||||
importlib_resources/tests/test_open.py,sha256=eCDLP6SszzBK6vs5j6LDkuR0Y2rHXrTYNRVZsAFKnCQ,2681
|
||||
importlib_resources/tests/test_path.py,sha256=rz_BOqNzEu5ZRyrhAHOi9tvx3K_0AqHiRNkYT4QFhQw,1985
|
@ -1,5 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (70.2.0)
|
||||
Generator: setuptools (74.1.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@ -1,4 +1,11 @@
|
||||
"""Read resources contained within a package."""
|
||||
"""
|
||||
Read resources contained within a package.
|
||||
|
||||
This codebase is shared between importlib.resources in the stdlib
|
||||
and importlib_resources in PyPI. See
|
||||
https://github.com/python/importlib_metadata/wiki/Development-Methodology
|
||||
for more detail.
|
||||
"""
|
||||
|
||||
from ._common import (
|
||||
as_file,
|
||||
|
@ -183,7 +183,7 @@ def _(path):
|
||||
@contextlib.contextmanager
|
||||
def _temp_path(dir: tempfile.TemporaryDirectory):
|
||||
"""
|
||||
Wrap tempfile.TemporyDirectory to return a pathlib object.
|
||||
Wrap tempfile.TemporaryDirectory to return a pathlib object.
|
||||
"""
|
||||
with dir as result:
|
||||
yield pathlib.Path(result)
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import itertools
|
||||
@ -5,6 +7,7 @@ import pathlib
|
||||
import operator
|
||||
import re
|
||||
import warnings
|
||||
from collections.abc import Iterator
|
||||
|
||||
from . import abc
|
||||
|
||||
@ -135,27 +138,31 @@ class NamespaceReader(abc.TraversableResources):
|
||||
def __init__(self, namespace_path):
|
||||
if 'NamespacePath' not in str(namespace_path):
|
||||
raise ValueError('Invalid path')
|
||||
self.path = MultiplexedPath(*map(self._resolve, namespace_path))
|
||||
self.path = MultiplexedPath(*filter(bool, map(self._resolve, namespace_path)))
|
||||
|
||||
@classmethod
|
||||
def _resolve(cls, path_str) -> abc.Traversable:
|
||||
def _resolve(cls, path_str) -> abc.Traversable | None:
|
||||
r"""
|
||||
Given an item from a namespace path, resolve it to a Traversable.
|
||||
|
||||
path_str might be a directory on the filesystem or a path to a
|
||||
zipfile plus the path within the zipfile, e.g. ``/foo/bar`` or
|
||||
``/foo/baz.zip/inner_dir`` or ``foo\baz.zip\inner_dir\sub``.
|
||||
|
||||
path_str might also be a sentinel used by editable packages to
|
||||
trigger other behaviors (see python/importlib_resources#311).
|
||||
In that case, return None.
|
||||
"""
|
||||
(dir,) = (cand for cand in cls._candidate_paths(path_str) if cand.is_dir())
|
||||
return dir
|
||||
dirs = (cand for cand in cls._candidate_paths(path_str) if cand.is_dir())
|
||||
return next(dirs, None)
|
||||
|
||||
@classmethod
|
||||
def _candidate_paths(cls, path_str):
|
||||
def _candidate_paths(cls, path_str: str) -> Iterator[abc.Traversable]:
|
||||
yield pathlib.Path(path_str)
|
||||
yield from cls._resolve_zip_path(path_str)
|
||||
|
||||
@staticmethod
|
||||
def _resolve_zip_path(path_str):
|
||||
def _resolve_zip_path(path_str: str):
|
||||
for match in reversed(list(re.finditer(r'[\\/]', path_str))):
|
||||
with contextlib.suppress(
|
||||
FileNotFoundError,
|
||||
|
@ -60,6 +60,26 @@ class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase):
|
||||
class OpenNamespaceTests(FilesTests, util.DiskSetup, unittest.TestCase):
|
||||
MODULE = 'namespacedata01'
|
||||
|
||||
def test_non_paths_in_dunder_path(self):
|
||||
"""
|
||||
Non-path items in a namespace package's ``__path__`` are ignored.
|
||||
|
||||
As reported in python/importlib_resources#311, some tools
|
||||
like Setuptools, when creating editable packages, will inject
|
||||
non-paths into a namespace package's ``__path__``, a
|
||||
sentinel like
|
||||
``__editable__.sample_namespace-1.0.finder.__path_hook__``
|
||||
to cause the ``PathEntryFinder`` to be called when searching
|
||||
for packages. In that case, resources should still be loadable.
|
||||
"""
|
||||
import namespacedata01
|
||||
|
||||
namespacedata01.__path__.append(
|
||||
'__editable__.sample_namespace-1.0.finder.__path_hook__'
|
||||
)
|
||||
|
||||
resources.files(namespacedata01)
|
||||
|
||||
|
||||
class OpenNamespaceZipTests(FilesTests, util.ZipSetup, unittest.TestCase):
|
||||
ZIP_MODULE = 'namespacedata01'
|
||||
|
@ -1,90 +0,0 @@
|
||||
Metadata-Version: 2.3
|
||||
Name: jiter
|
||||
Version: 0.5.0
|
||||
Classifier: Development Status :: 4 - Beta
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: Information Technology
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: Unix
|
||||
Classifier: Operating System :: POSIX :: Linux
|
||||
Classifier: Environment :: Console
|
||||
Classifier: Environment :: MacOS X
|
||||
Classifier: Topic :: File Formats :: JSON
|
||||
Classifier: Framework :: Pydantic :: 2
|
||||
Summary: Fast iterable JSON parser.
|
||||
Keywords: JSON,parsing,deserialization,iter
|
||||
Home-Page: https://github.com/pydantic/jiter/
|
||||
Author: Samuel Colvin <samuel@pydantic.dev>
|
||||
Author-email: Samuel Colvin <s@muelcolvin.com>
|
||||
License: MIT
|
||||
Requires-Python: >=3.8
|
||||
Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
|
||||
Project-URL: Source Code, https://github.com/pydantic/jiter/
|
||||
|
||||
# jiter
|
||||
|
||||
[](https://github.com/pydantic/jiter/actions?query=event%3Apush+branch%3Amain+workflow%3ACI)
|
||||
[](https://pypi.python.org/pypi/jiter)
|
||||
[](https://github.com/pydantic/jiter)
|
||||
[](https://github.com/pydantic/jiter/blob/main/LICENSE)
|
||||
|
||||
This is a standalone version of the JSON parser used in `pydantic-core`. The recommendation is to only use this package directly if you do not use `pydantic`.
|
||||
|
||||
The API is extremely minimal:
|
||||
|
||||
```python
|
||||
def from_json(
|
||||
json_data: bytes,
|
||||
/,
|
||||
*,
|
||||
allow_inf_nan: bool = True,
|
||||
cache_mode: Literal[True, False, "all", "keys", "none"] = "all",
|
||||
partial_mode: Literal[True, False, "off", "on", "trailing-strings"] = False,
|
||||
catch_duplicate_keys: bool = False,
|
||||
lossless_floats: bool = False,
|
||||
) -> Any:
|
||||
"""
|
||||
Parse input bytes into a JSON object.
|
||||
|
||||
Arguments:
|
||||
json_data: The JSON data to parse
|
||||
allow_inf_nan: Whether to allow infinity (`Infinity` an `-Infinity`) and `NaN` values to float fields.
|
||||
Defaults to True.
|
||||
cache_mode: cache Python strings to improve performance at the cost of some memory usage
|
||||
- True / 'all' - cache all strings
|
||||
- 'keys' - cache only object keys
|
||||
- False / 'none' - cache nothing
|
||||
partial_mode: How to handle incomplete strings:
|
||||
- False / 'off' - raise an exception if the input is incomplete
|
||||
- True / 'on' - allow incomplete JSON but discard the last string if it is incomplete
|
||||
- 'trailing-strings' - allow incomplete JSON, and include the last incomplete string in the output
|
||||
catch_duplicate_keys: if True, raise an exception if objects contain the same key multiple times
|
||||
lossless_floats: if True, preserve full detail on floats using `LosslessFloat`
|
||||
|
||||
Returns:
|
||||
Python object built from the JSON input.
|
||||
"""
|
||||
|
||||
def cache_clear() -> None:
|
||||
"""
|
||||
Reset the string cache.
|
||||
"""
|
||||
|
||||
def cache_usage() -> int:
|
||||
"""
|
||||
get the size of the string cache.
|
||||
|
||||
Returns:
|
||||
Size of the string cache in bytes.
|
||||
"""
|
||||
```
|
||||
|
@ -1,9 +0,0 @@
|
||||
jiter-0.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
jiter-0.5.0.dist-info/METADATA,sha256=INyvfU6eGMFpoEEddvNmFlPeL_ey3cBKh23wc1c8NkA,3625
|
||||
jiter-0.5.0.dist-info/RECORD,,
|
||||
jiter-0.5.0.dist-info/WHEEL,sha256=D5cP3kgsEJ0lgxRqyO9DXgrbimFqUu0b_jhJwYgzYlI,127
|
||||
jiter/__init__.py,sha256=Fp9HkOixiYYDSiC_80vmiJ_sCoCGT8OAh48yltm0lP0,103
|
||||
jiter/__init__.pyi,sha256=hla9Vbb00bLOJuCCnNP3Vmea4bxwdDXlxXUSmfqlTfY,2329
|
||||
jiter/__pycache__/__init__.cpython-38.pyc,,
|
||||
jiter/jiter.cpython-38-x86_64-linux-gnu.so,sha256=ypxOQeMHpekEfKDomRWyEadHTSx4_5kXATTglJpHKqI,756000
|
||||
jiter/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@ -1,4 +0,0 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: maturin (1.6.0)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64
|
@ -1,5 +0,0 @@
|
||||
from .jiter import *
|
||||
|
||||
__doc__ = jiter.__doc__
|
||||
if hasattr(jiter, "__all__"):
|
||||
__all__ = jiter.__all__
|
@ -1,70 +0,0 @@
|
||||
import decimal
|
||||
from typing import Any, Literal
|
||||
|
||||
def from_json(
|
||||
json_data: bytes,
|
||||
/,
|
||||
*,
|
||||
allow_inf_nan: bool = True,
|
||||
cache_mode: Literal[True, False, "all", "keys", "none"] = "all",
|
||||
partial_mode: Literal[True, False, "off", "on", "trailing-strings"] = False,
|
||||
catch_duplicate_keys: bool = False,
|
||||
lossless_floats: bool = False,
|
||||
) -> Any:
|
||||
"""
|
||||
Parse input bytes into a JSON object.
|
||||
|
||||
Arguments:
|
||||
json_data: The JSON data to parse
|
||||
allow_inf_nan: Whether to allow infinity (`Infinity` an `-Infinity`) and `NaN` values to float fields.
|
||||
Defaults to True.
|
||||
cache_mode: cache Python strings to improve performance at the cost of some memory usage
|
||||
- True / 'all' - cache all strings
|
||||
- 'keys' - cache only object keys
|
||||
- False / 'none' - cache nothing
|
||||
partial_mode: How to handle incomplete strings:
|
||||
- False / 'off' - raise an exception if the input is incomplete
|
||||
- True / 'on' - allow incomplete JSON but discard the last string if it is incomplete
|
||||
- 'trailing-strings' - allow incomplete JSON, and include the last incomplete string in the output
|
||||
catch_duplicate_keys: if True, raise an exception if objects contain the same key multiple times
|
||||
lossless_floats: if True, preserve full detail on floats using `LosslessFloat`
|
||||
|
||||
Returns:
|
||||
Python object built from the JSON input.
|
||||
"""
|
||||
|
||||
def cache_clear() -> None:
|
||||
"""
|
||||
Reset the string cache.
|
||||
"""
|
||||
|
||||
def cache_usage() -> int:
|
||||
"""
|
||||
get the size of the string cache.
|
||||
|
||||
Returns:
|
||||
Size of the string cache in bytes.
|
||||
"""
|
||||
|
||||
|
||||
class LosslessFloat:
|
||||
"""
|
||||
Represents a float from JSON, by holding the underlying bytes representing a float from JSON.
|
||||
"""
|
||||
def __init__(self, json_float: bytes):
|
||||
"""Construct a LosslessFloat object from a JSON bytes slice"""
|
||||
|
||||
def as_decimal(self) -> decimal.Decimal:
|
||||
"""Construct a Python Decimal from the JSON bytes slice"""
|
||||
|
||||
def __float__(self) -> float:
|
||||
"""Construct a Python float from the JSON bytes slice"""
|
||||
|
||||
def __bytes__(self) -> bytes:
|
||||
"""Return the JSON bytes slice as bytes"""
|
||||
|
||||
def __str__(self):
|
||||
"""Return the JSON bytes slice as a string"""
|
||||
|
||||
def __repr__(self):
|
||||
...
|
Binary file not shown.
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 2.3
|
||||
Name: openai
|
||||
Version: 1.42.0
|
||||
Version: 1.35.15
|
||||
Summary: The official Python library for the openai API
|
||||
Project-URL: Homepage, https://github.com/openai/openai-python
|
||||
Project-URL: Repository, https://github.com/openai/openai-python
|
||||
@ -27,11 +27,10 @@ Requires-Dist: anyio<5,>=3.5.0
|
||||
Requires-Dist: cached-property; python_version < '3.8'
|
||||
Requires-Dist: distro<2,>=1.7.0
|
||||
Requires-Dist: httpx<1,>=0.23.0
|
||||
Requires-Dist: jiter<1,>=0.4.0
|
||||
Requires-Dist: pydantic<3,>=1.9.0
|
||||
Requires-Dist: sniffio
|
||||
Requires-Dist: tqdm>4
|
||||
Requires-Dist: typing-extensions<5,>=4.11
|
||||
Requires-Dist: typing-extensions<5,>=4.7
|
||||
Provides-Extra: datalib
|
||||
Requires-Dist: numpy>=1; extra == 'datalib'
|
||||
Requires-Dist: pandas-stubs>=1.1.0.11; extra == 'datalib'
|
||||
@ -268,7 +267,7 @@ List methods in the OpenAI API are paginated.
|
||||
This library provides auto-paginating iterators with each list response, so you do not have to request successive pages manually:
|
||||
|
||||
```python
|
||||
from openai import OpenAI
|
||||
import openai
|
||||
|
||||
client = OpenAI()
|
||||
|
||||
@ -286,7 +285,7 @@ Or, asynchronously:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from openai import AsyncOpenAI
|
||||
import openai
|
||||
|
||||
client = AsyncOpenAI()
|
||||
|
@ -1,11 +1,11 @@
|
||||
../../../bin/openai,sha256=OM6FORuLrwfh02Zj_-DY6nOIKjU9ftrONpb5slPZlhM,253
|
||||
openai-1.42.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
openai-1.42.0.dist-info/METADATA,sha256=wqpV0i8EEyBHUTds1sDW4Sz3dLosc--HIPmYYsxzvA8,22163
|
||||
openai-1.42.0.dist-info/RECORD,,
|
||||
openai-1.42.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
||||
openai-1.42.0.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43
|
||||
openai-1.42.0.dist-info/licenses/LICENSE,sha256=d0M6HDjQ76tf255XPlAGkIoECMe688MXcGEYsOFySfI,11336
|
||||
openai/__init__.py,sha256=YhCuMuxZHoRn6BnOxawEFt8fRZPnhBWGongW3CP-F3k,10191
|
||||
openai-1.35.15.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
openai-1.35.15.dist-info/METADATA,sha256=jpHjd1X8VE9L4Lmtl0Lg2NokZkCGG1bOYqnBPkiIVfg,22103
|
||||
openai-1.35.15.dist-info/RECORD,,
|
||||
openai-1.35.15.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
||||
openai-1.35.15.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43
|
||||
openai-1.35.15.dist-info/licenses/LICENSE,sha256=d0M6HDjQ76tf255XPlAGkIoECMe688MXcGEYsOFySfI,11336
|
||||
openai/__init__.py,sha256=hTM-EsfeafKBLu-n5AVSQVDB2MMBGnZoLtATFeW-OL0,10007
|
||||
openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30
|
||||
openai/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/__pycache__/__main__.cpython-38.pyc,,
|
||||
@ -26,11 +26,11 @@ openai/__pycache__/_types.cpython-38.pyc,,
|
||||
openai/__pycache__/_version.cpython-38.pyc,,
|
||||
openai/__pycache__/pagination.cpython-38.pyc,,
|
||||
openai/__pycache__/version.cpython-38.pyc,,
|
||||
openai/_base_client.py,sha256=iiwtXGJA6mpeDhQg6pR22-WdDQ9NTszQevbmL6yOOfo,67460
|
||||
openai/_client.py,sha256=PXHky30KYjUMIH8WV7PjKcOAULO9-36AbN8y1DCFu70,22233
|
||||
openai/_compat.py,sha256=DvCJ4A6g0azYhKPpLKXoi1eYXpwhukMGUxJclk2DtUA,6832
|
||||
openai/_base_client.py,sha256=YtSbhE5PT7vqFIosCyA9-e6zPt2GUhDZa9HeD1k0FxE,67174
|
||||
openai/_client.py,sha256=BIWse5bWvbEIyGNdGqM5RjjP6zD0oFvFKFtqbIO-xf4,21751
|
||||
openai/_compat.py,sha256=BPuFBQUQE-C6lIB2PTMco9-g08p94cqPxbU3x8VENso,6430
|
||||
openai/_constants.py,sha256=L1pfEhuz_wM2w2_U9P_9JZzTbrN4pbLo207l96rtKcQ,469
|
||||
openai/_exceptions.py,sha256=QoeAOAeOatAOxt80wJShYUQti-QQc2Pur45IxiWN-zk,4376
|
||||
openai/_exceptions.py,sha256=IXzw429JsoOD9PbpqLWvncvuOuqU_GOZ1z9D494BZxU,3892
|
||||
openai/_extras/__init__.py,sha256=LZbJLZ7aFHRcI7uiY4-wFQTdMp-BF6FER1QMhKVFkWk,107
|
||||
openai/_extras/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/_extras/__pycache__/_common.cpython-38.pyc,,
|
||||
@ -39,15 +39,15 @@ openai/_extras/__pycache__/pandas_proxy.cpython-38.pyc,,
|
||||
openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364
|
||||
openai/_extras/numpy_proxy.py,sha256=hwZXa_JBAPD5taRhor1tGxK26g5IaK52JclQDl-dky0,799
|
||||
openai/_extras/pandas_proxy.py,sha256=NCEt1Dqwc_0H85YdsWPDE3lPDJtYnBT8G-gJE_BCeEc,637
|
||||
openai/_files.py,sha256=WEf6hxJN1u3pVkdnPCpinhxCUnOV2olt4J6vLoJ_k48,3616
|
||||
openai/_legacy_response.py,sha256=7NyYX04zAjuGGypOjOWwS7hljQq5OSyR-6B6_w3_q6g,15781
|
||||
openai/_models.py,sha256=pXirq94yiihFXBbiR50vA-0NIlDurxbJ_rLXK062vWQ,28267
|
||||
openai/_files.py,sha256=O4WNhHahzd5ZRe4F69WlBJegBpQM3O9YGeXWNkz972Y,3632
|
||||
openai/_legacy_response.py,sha256=GLrqADb4ed3N5hffQJpU2nSZQ85OVLODB4JVcWFA4u4,15575
|
||||
openai/_models.py,sha256=iIdzp18nPedv1mQxC6hoNDEsRIAiPkS88e4ZwA5mpGo,27892
|
||||
openai/_module_client.py,sha256=gF_2bbdosIwUt29sQgrQRJOgNREvXF-IDxe4XKGhHjY,2523
|
||||
openai/_qs.py,sha256=AOkSz4rHtK4YI3ZU_kzea-zpwBUgEY8WniGmTPyEimc,4846
|
||||
openai/_resource.py,sha256=IQihFzFLhGOiGSlT2dO1ESWSTg2XypgbtAldtGdTOqU,1100
|
||||
openai/_response.py,sha256=UW8TM-E4YE6UzhKcWOoGvBH3cVKh-aFe1yEL7wZaMIQ,29018
|
||||
openai/_response.py,sha256=FhY-5uevGc0KRDmI0eH5n1g4ok-t4lcNq8aDnM-DWqE,28873
|
||||
openai/_streaming.py,sha256=t1UZrg53fVJB5Rs6k2sT9PBbvjp-IGrQzUq_5nlxKG4,13102
|
||||
openai/_types.py,sha256=77A36sAUMgrgTX3zNo2NKU_wbQZgoZWjGTwf3GTOGTc,6202
|
||||
openai/_types.py,sha256=sZvy7fSCEWzjt1Fw9gqYHLJ78q9eces6pzMYAbPSyHQ,6226
|
||||
openai/_utils/__init__.py,sha256=Uzq1-FIih_VUjzdNVWXks0sdC39KBKLMrZoz-_JOjJ4,1988
|
||||
openai/_utils/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/_utils/__pycache__/_logs.cpython-38.pyc,,
|
||||
@ -59,14 +59,14 @@ openai/_utils/__pycache__/_transform.cpython-38.pyc,,
|
||||
openai/_utils/__pycache__/_typing.cpython-38.pyc,,
|
||||
openai/_utils/__pycache__/_utils.cpython-38.pyc,,
|
||||
openai/_utils/_logs.py,sha256=sFA_NejuNObTGGbfsXC03I38mrT9HjsgAJx4d3GP0ok,774
|
||||
openai/_utils/_proxy.py,sha256=z3zsateHtb0EARTWKk8QZNHfPkqJbqwd1lM993LBwGE,1902
|
||||
openai/_utils/_reflection.py,sha256=ZmGkIgT_PuwedyNBrrKGbxoWtkpytJNU1uU4QHnmEMU,1364
|
||||
openai/_utils/_proxy.py,sha256=DjcB-BBIRagSbMut2pF_jZavjda9sPvmQCKtVXBhs0I,1910
|
||||
openai/_utils/_reflection.py,sha256=k20KwLejVHcQCvu4mT2S61NDvbmuXF7KsMDapiocYS0,1364
|
||||
openai/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289
|
||||
openai/_utils/_sync.py,sha256=9ex9pfOyd8xAF1LxpFx4IkqL8k0vk8srE2Ee-OTMQ0A,2840
|
||||
openai/_utils/_transform.py,sha256=NCz3q9_O-vuj60xVe-qzhEQ8uJWlZWJTsM-GwHDccf8,12958
|
||||
openai/_utils/_typing.py,sha256=tFbktdpdHCQliwzGsWysgn0P5H0JRdagkZdb_LegGkY,3838
|
||||
openai/_utils/_utils.py,sha256=LMVTMZG8pfu8AkJNSfmv_z3guQlOfm2UxDTjTTXggfg,11411
|
||||
openai/_version.py,sha256=QnQeI7AaHUd-wvlAsHbOZ18hlZh1vO0ZLKyegXQlsqk,159
|
||||
openai/_utils/_utils.py,sha256=FaZdW0tWil7IERdxUfKt7pVcyXL2aCnR3lo73q66qgI,11447
|
||||
openai/_version.py,sha256=O3ygey2Uj1WzxixsY9mqqswKcNhg4-Z0Ivm-ZHJHmcw,160
|
||||
openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31
|
||||
openai/cli/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/cli/__pycache__/_cli.cpython-38.pyc,,
|
||||
@ -83,7 +83,7 @@ openai/cli/_api/__pycache__/files.cpython-38.pyc,,
|
||||
openai/cli/_api/__pycache__/image.cpython-38.pyc,,
|
||||
openai/cli/_api/__pycache__/models.cpython-38.pyc,,
|
||||
openai/cli/_api/_main.py,sha256=5yyfLURqCEaAN8B61gHaqVAaYgtyb9Xq0ncQ3P2BAh0,451
|
||||
openai/cli/_api/audio.py,sha256=IPbABMwryQ0CQTF4gi6VS3hJi6qFjoyj6IDV2ZoPT6A,3787
|
||||
openai/cli/_api/audio.py,sha256=HZDTRZT-qZTMsg7WOm-djCQlf874aSa3lxRvNG27wLM,3347
|
||||
openai/cli/_api/chat/__init__.py,sha256=MhFUQH9F6QCtbPMlbsU_DWTd7wc5DSCZ7Wy3FBGVij0,300
|
||||
openai/cli/_api/chat/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/cli/_api/chat/__pycache__/completions.cpython-38.pyc,,
|
||||
@ -93,7 +93,7 @@ openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345
|
||||
openai/cli/_api/image.py,sha256=ovBExdn8oUK9ImOpsPafesfAlmcftLP2p7d37hcUtKU,5062
|
||||
openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295
|
||||
openai/cli/_cli.py,sha256=WxqTnhVVtfzX0z7hV5fcvd3hkihaUgwOWpXOwyCS4Fc,6743
|
||||
openai/cli/_errors.py,sha256=nejlu1HnOyAIr2n7uqpFtWn8XclWj_9N8FwgfT3BPK8,471
|
||||
openai/cli/_errors.py,sha256=7BYF2Kp_L6yKsZDNdg-gK71FMVCNjhrunfVVgh4Zy0M,479
|
||||
openai/cli/_models.py,sha256=tgsldjG216KpwgAZ5pS0sV02FQvONDJU2ElA4kCCiIU,491
|
||||
openai/cli/_progress.py,sha256=aMLssU9jh-LoqRYH3608jNos7r6vZKnHTRlHxFznzv4,1406
|
||||
openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58
|
||||
@ -103,42 +103,22 @@ openai/cli/_tools/__pycache__/fine_tunes.cpython-38.pyc,,
|
||||
openai/cli/_tools/__pycache__/migrate.cpython-38.pyc,,
|
||||
openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467
|
||||
openai/cli/_tools/fine_tunes.py,sha256=RQgYMzifk6S7Y1I1K6huqco2QxmXa7gVUlHl6SrKTSU,1543
|
||||
openai/cli/_tools/migrate.py,sha256=OM2VJiMzg5rglV56Y91kFe5L4UoZZmEhcPh6qSO9nsc,4506
|
||||
openai/cli/_tools/migrate.py,sha256=GD3zHR700FRIhdx3gBqIrRLPrKjx4pDAKUgvnO0J2ug,5013
|
||||
openai/cli/_utils.py,sha256=oiTc9MnxQh_zxAZ1OIHPkoDpCll0NF9ZgkdFHz4T-Bs,848
|
||||
openai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
|
||||
openai/lib/__init__.py,sha256=BMTfMnlbugMgDA1STDIAlx4bI4t4l_8bQmJxd0th0n8,126
|
||||
openai/lib/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/lib/__pycache__/_old_api.cpython-38.pyc,,
|
||||
openai/lib/__pycache__/_pydantic.cpython-38.pyc,,
|
||||
openai/lib/__pycache__/_tools.cpython-38.pyc,,
|
||||
openai/lib/__pycache__/_validators.cpython-38.pyc,,
|
||||
openai/lib/__pycache__/azure.cpython-38.pyc,,
|
||||
openai/lib/_old_api.py,sha256=XZnXBrEKuTd70iJirj5mGW35fZoqruJobbBTq6bvg10,1947
|
||||
openai/lib/_parsing/__init__.py,sha256=wS3BYvMGj9TqiPqOe3rO1sleaAJqHVuCaQuCE5rZIUw,539
|
||||
openai/lib/_parsing/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/lib/_parsing/__pycache__/_completions.cpython-38.pyc,,
|
||||
openai/lib/_parsing/_completions.py,sha256=r20cZnzQMKWRyeo58dr8znXI1VX8cfZtmcFanjKvRZI,9112
|
||||
openai/lib/_pydantic.py,sha256=ndHdDDSEGg8Jbhc7JvLQHiIrZwLR36bCcUAlzwLmOdk,5282
|
||||
openai/lib/_tools.py,sha256=xrzM7jNgehZGsRQ9kSgn1q33z9cHrgf0b8UMo5wrTFw,1501
|
||||
openai/lib/_validators.py,sha256=cXJXFuaAl7jeJcYHXXnFa4NHGtHs-_zt3Zs1VVCmQo4,35288
|
||||
openai/lib/azure.py,sha256=iaiopzv8xI_JlYdtgEmnu32krLm0YiG44cDs_ictlgw,21536
|
||||
openai/lib/azure.py,sha256=H5ddHmaRnDbwEYMUUzdv4macQuDELOj617jgV-fYN0g,21584
|
||||
openai/lib/streaming/__init__.py,sha256=kD3LpjsqU7caDQDhB-YjTUl9qqbb5sPnGGSI2yQYC70,379
|
||||
openai/lib/streaming/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/lib/streaming/__pycache__/_assistants.cpython-38.pyc,,
|
||||
openai/lib/streaming/__pycache__/_deltas.cpython-38.pyc,,
|
||||
openai/lib/streaming/_assistants.py,sha256=_t1R-cTCXmKfQ3aLwSqGnYqjOxJIWhoWSvDHqIFCaPw,40575
|
||||
openai/lib/streaming/_deltas.py,sha256=I7B_AznXZwlBmE8Puau7ayTQUx6hMIEVE8FYTQm2fjs,2502
|
||||
openai/lib/streaming/chat/__init__.py,sha256=d243EsKxxHQ_MpUxecmYdLy4ZRVY6BKhL6QNSfLdtRY,1245
|
||||
openai/lib/streaming/chat/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/lib/streaming/chat/__pycache__/_completions.cpython-38.pyc,,
|
||||
openai/lib/streaming/chat/__pycache__/_events.cpython-38.pyc,,
|
||||
openai/lib/streaming/chat/__pycache__/_types.cpython-38.pyc,,
|
||||
openai/lib/streaming/chat/_completions.py,sha256=Kje6_zfjFKxk0NIOSc9JZ_YxkhLKSlb8-weEtzt93Sc,28604
|
||||
openai/lib/streaming/chat/_events.py,sha256=lstVmM6YR2Cs9drikzrY9JCZn9Nbfym0aKIPtNpxL6w,2618
|
||||
openai/lib/streaming/chat/_types.py,sha256=-SYVBNhGkOUoJ-8dotxpCRqPJpfyOQ8hwR2_HrsQCRI,739
|
||||
openai/pagination.py,sha256=B9ejXEAR_hYGLHfqb9xEEsE0u5dCUMjvplOce5dpY7M,2760
|
||||
openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
openai/resources/__init__.py,sha256=eYonVyf6AAmk-b8JYSYmo5EEMv89ovxiAY5A83ti8J8,4533
|
||||
openai/resources/__init__.py,sha256=odhKOSLopY06Kz2fJy9oafb2xViXgkLRJ2vn0Kc7qJA,4166
|
||||
openai/resources/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/__pycache__/batches.cpython-38.pyc,,
|
||||
openai/resources/__pycache__/completions.cpython-38.pyc,,
|
||||
@ -154,22 +134,16 @@ openai/resources/audio/__pycache__/speech.cpython-38.pyc,,
|
||||
openai/resources/audio/__pycache__/transcriptions.cpython-38.pyc,,
|
||||
openai/resources/audio/__pycache__/translations.cpython-38.pyc,,
|
||||
openai/resources/audio/audio.py,sha256=1HHcDRWT58KshYelRdSnJs-0bvMBRS1vOhnU-h_oP5s,4481
|
||||
openai/resources/audio/speech.py,sha256=s93RA77J_uS1Nu95dnrbbIahE-U-Csr9RozgqMFxB54,7885
|
||||
openai/resources/audio/transcriptions.py,sha256=acLwtVFCuTuG4OYHdzJlRq70hAMLzWV8YJGGc64EBOU,11075
|
||||
openai/resources/audio/translations.py,sha256=QFrFjB3JkruF7MPCJfsgGDRSlyKHAEdJLaF-OdhR3UY,8979
|
||||
openai/resources/audio/speech.py,sha256=A4_SwpCesEfHg89cxazNdrHz8JxNvUp5LlLNoMqo-0w,7876
|
||||
openai/resources/audio/transcriptions.py,sha256=Wqg-vcBBssCQ7OaSRK5r9m2MPhMgK3kU2TmgmzKF_7Q,11060
|
||||
openai/resources/audio/translations.py,sha256=4HCXjxsw3h1498AVfMqL28ohZo00JcVrY-hCdgX2xAI,9002
|
||||
openai/resources/batches.py,sha256=XuXe_9xb_x-Kq0myKTkcKHQqCf3OHhhNIXr4295mNHM,18560
|
||||
openai/resources/beta/__init__.py,sha256=nXoV4P8WCrbEZuNMtptbIuy_LqlVafY9lJ2qfW35GFc,1636
|
||||
openai/resources/beta/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/beta/__pycache__/assistants.cpython-38.pyc,,
|
||||
openai/resources/beta/__pycache__/beta.cpython-38.pyc,,
|
||||
openai/resources/beta/assistants.py,sha256=2zxpTMThsrleTO_jQiHyvJT0mu2Tth2pS5DKpO5B2UI,39567
|
||||
openai/resources/beta/beta.py,sha256=Ys5tPrAmvr7_CmHJLKhnsQqWXGo5seX8-rP8HhXwk0w,4702
|
||||
openai/resources/beta/chat/__init__.py,sha256=d_fpyFMAG3iRAPIXANPfRG4HtEm6U_uMUYep7Skj2uY,263
|
||||
openai/resources/beta/chat/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/beta/chat/__pycache__/chat.cpython-38.pyc,,
|
||||
openai/resources/beta/chat/__pycache__/completions.cpython-38.pyc,,
|
||||
openai/resources/beta/chat/chat.py,sha256=sNvU8Fi_o3dWkD_X4Mobafv9XWBP6Y2dJxng-NdFXUs,597
|
||||
openai/resources/beta/chat/completions.py,sha256=L0e0f-OwHaEFyc9rwmskIyuf_bq5sfa3vowWp83j3z8,20282
|
||||
openai/resources/beta/assistants.py,sha256=jE9tf1oWbDEf28WRRD2_lgg_pkz52aHi0xM0-B7cuwI,39768
|
||||
openai/resources/beta/beta.py,sha256=xw_dfi9ZpyRG4ChwweQtirWwsWxhAA4mXSV46D7pS5M,4485
|
||||
openai/resources/beta/threads/__init__.py,sha256=fQ_qdUVSfouVS5h47DlTb5mamChT4K-v-siPuuAB6do,1177
|
||||
openai/resources/beta/threads/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/beta/threads/__pycache__/messages.cpython-38.pyc,,
|
||||
@ -179,9 +153,9 @@ openai/resources/beta/threads/runs/__init__.py,sha256=2FfDaqwmJJCd-IVpY_CrzWcFvw
|
||||
openai/resources/beta/threads/runs/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/beta/threads/runs/__pycache__/runs.cpython-38.pyc,,
|
||||
openai/resources/beta/threads/runs/__pycache__/steps.cpython-38.pyc,,
|
||||
openai/resources/beta/threads/runs/runs.py,sha256=q3LJ5EaOwTIeuu2Li4JtH3HXj3WOcevL_7Lozj_1MOY,137198
|
||||
openai/resources/beta/threads/runs/runs.py,sha256=4i5Q5Z1WePpOt9h9vPUSKEz3raNGw2k4dKMEl6zIyW4,150971
|
||||
openai/resources/beta/threads/runs/steps.py,sha256=uRykb4JapSNZCF8OD54f5qOWtrp2GoU1k5uAZgA4kAk,12223
|
||||
openai/resources/beta/threads/threads.py,sha256=943MpBSJUcyf46KcAvE-JUIdmcausRd_oceYFAZ0GgE,93208
|
||||
openai/resources/beta/threads/threads.py,sha256=cityWw78dV4wcZ5_PoZ8-xvxcnigHULqgLmLv2RK1yk,103550
|
||||
openai/resources/beta/vector_stores/__init__.py,sha256=11Xn1vhgndWiI0defJHv31vmbtbDgh2GwZT3gX8GgHk,1296
|
||||
openai/resources/beta/vector_stores/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/beta/vector_stores/__pycache__/file_batches.cpython-38.pyc,,
|
||||
@ -195,10 +169,10 @@ openai/resources/chat/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/chat/__pycache__/chat.cpython-38.pyc,,
|
||||
openai/resources/chat/__pycache__/completions.cpython-38.pyc,,
|
||||
openai/resources/chat/chat.py,sha256=Edexhbq1anfSS_I0wNRQb7rx1OV6-rq4sxgVlYDGb6Y,2342
|
||||
openai/resources/chat/completions.py,sha256=BmiebxGg6AcRMyz31svm6SZK2lgv18H9oxiSwY2Cp-M,78489
|
||||
openai/resources/chat/completions.py,sha256=pYUx7M9MpajLSp8tHooNtKrAuA3qeLFGxI-LGRngeh8,74845
|
||||
openai/resources/completions.py,sha256=4Rfv9o3XwI5GRfhN1RD4tEgNn0I2jb6TRW6j0b6bpZc,58712
|
||||
openai/resources/embeddings.py,sha256=cMSXtMc_7mBqlSiQ99B7qXYoRLGyoeIFazyYQ0jJ1O4,10755
|
||||
openai/resources/files.py,sha256=xJAKyPfmnxO2Jn5Rx-swxggY23VFcdM8ZxDT7ZjeJAM,27092
|
||||
openai/resources/files.py,sha256=InC0e28vgMKM5pNadUhGGvDQ0Rvb1D4WcEDfanFJV2E,27156
|
||||
openai/resources/fine_tuning/__init__.py,sha256=s6uoq7gM4gwoywdOOZQkPeYiSbUl-OwpeuMhwJJk0lc,837
|
||||
openai/resources/fine_tuning/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/fine_tuning/__pycache__/fine_tuning.cpython-38.pyc,,
|
||||
@ -208,19 +182,12 @@ openai/resources/fine_tuning/jobs/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/fine_tuning/jobs/__pycache__/checkpoints.cpython-38.pyc,,
|
||||
openai/resources/fine_tuning/jobs/__pycache__/jobs.cpython-38.pyc,,
|
||||
openai/resources/fine_tuning/jobs/checkpoints.py,sha256=6uP1CCGkzE_n8FsVdTQ36eH_eiq24wOxQQ5zzOy0UEU,6456
|
||||
openai/resources/fine_tuning/jobs/jobs.py,sha256=6GOXP6GTRlmYeDaVA_FK0QDioudPtty7qNH4uo-_j58,27502
|
||||
openai/resources/images.py,sha256=1oKEnEalLjSxnyOe-Zzk3CsL9ou876ePUgOh8vtUc3I,24616
|
||||
openai/resources/fine_tuning/jobs/jobs.py,sha256=Nf9SUSi5nXlcPSNIIstU7gS_j6YqbImFxQ01PN4TnF0,27474
|
||||
openai/resources/images.py,sha256=1mgpeNX-3gcghNEaYiLII_5672aCyH64C7puTgAm_Tw,24660
|
||||
openai/resources/models.py,sha256=XF3E56V62YZq-HrStUDDvfrT2RHj98P8Y-oOrPSPRX0,10222
|
||||
openai/resources/moderations.py,sha256=Jc6m5wsaWcqx9ls8HORqwqKZ-a7xy4mSpD3R3J-n7nc,6603
|
||||
openai/resources/uploads/__init__.py,sha256=HmY3WQgvUI2bN3CjfWHWQOk7UUC6Ozna97_lHhrrRSA,810
|
||||
openai/resources/uploads/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/resources/uploads/__pycache__/parts.cpython-38.pyc,,
|
||||
openai/resources/uploads/__pycache__/uploads.cpython-38.pyc,,
|
||||
openai/resources/uploads/parts.py,sha256=8xBjww6Na7qx6QVTG-lyuKzgF1gs7ldP_VwnwMFFjd8,7132
|
||||
openai/resources/uploads/uploads.py,sha256=9VGy4T9_aWCpmzTjQ24Q3UiFadop6Q23qWcWuTK5f6Q,23900
|
||||
openai/types/__init__.py,sha256=lglnC6n1TCz8ElBWzsJNmYZtl1eLnHFhM0l-vXOQZXQ,2624
|
||||
openai/resources/moderations.py,sha256=WPMrXyYXxFXHFyyF_xzg_1Uj3Xtb3KbxsnJm3SQYgcA,6685
|
||||
openai/types/__init__.py,sha256=KWciGl8_OEKoOmVP3_wHo26Qs3FMwRUl91P8CZXcHZY,2051
|
||||
openai/types/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/__pycache__/audio_model.cpython-38.pyc,,
|
||||
openai/types/__pycache__/batch.cpython-38.pyc,,
|
||||
openai/types/__pycache__/batch_create_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/batch_error.cpython-38.pyc,,
|
||||
@ -239,50 +206,43 @@ openai/types/__pycache__/file_create_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/file_deleted.cpython-38.pyc,,
|
||||
openai/types/__pycache__/file_list_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/file_object.cpython-38.pyc,,
|
||||
openai/types/__pycache__/file_purpose.cpython-38.pyc,,
|
||||
openai/types/__pycache__/image.cpython-38.pyc,,
|
||||
openai/types/__pycache__/image_create_variation_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/image_edit_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/image_generate_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/image_model.cpython-38.pyc,,
|
||||
openai/types/__pycache__/images_response.cpython-38.pyc,,
|
||||
openai/types/__pycache__/model.cpython-38.pyc,,
|
||||
openai/types/__pycache__/model_deleted.cpython-38.pyc,,
|
||||
openai/types/__pycache__/moderation.cpython-38.pyc,,
|
||||
openai/types/__pycache__/moderation_create_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/moderation_create_response.cpython-38.pyc,,
|
||||
openai/types/__pycache__/moderation_model.cpython-38.pyc,,
|
||||
openai/types/__pycache__/upload.cpython-38.pyc,,
|
||||
openai/types/__pycache__/upload_complete_params.cpython-38.pyc,,
|
||||
openai/types/__pycache__/upload_create_params.cpython-38.pyc,,
|
||||
openai/types/audio/__init__.py,sha256=7PRM0dwBGCBKcF_hkDHx2tVUdEW5jbwU8bFB9Vbtq-g,547
|
||||
openai/types/audio/__init__.py,sha256=slwR2gZwYMmTpPihbr1a2rryQuyfqeAGzgjluQwlmN4,494
|
||||
openai/types/audio/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/audio/__pycache__/speech_create_params.cpython-38.pyc,,
|
||||
openai/types/audio/__pycache__/speech_model.cpython-38.pyc,,
|
||||
openai/types/audio/__pycache__/transcription.cpython-38.pyc,,
|
||||
openai/types/audio/__pycache__/transcription_create_params.cpython-38.pyc,,
|
||||
openai/types/audio/__pycache__/translation.cpython-38.pyc,,
|
||||
openai/types/audio/__pycache__/translation_create_params.cpython-38.pyc,,
|
||||
openai/types/audio/speech_create_params.py,sha256=Q7EqgD5F5CV0tANvz30msMfYD4EgqGUZn4V4yDypSe4,1300
|
||||
openai/types/audio/speech_model.py,sha256=RUimvc__LYAxwEEmfrf-lj18O3EWrU1OlWZXEXN2AKY,218
|
||||
openai/types/audio/transcription.py,sha256=FP9QMwwwdqgvP3xY9P-40gBiFmMwFKxXM5yv5x8xPVk,230
|
||||
openai/types/audio/transcription_create_params.py,sha256=5tx0yAERDRZTG0IEsHQODKxMGZKrdRXGo5K2Is0gNw0,2253
|
||||
openai/types/audio/translation.py,sha256=5l-Zk9Cg7AZti-TTn2-4ydsoZj2zdvDwyzzVjVp9W0g,194
|
||||
openai/types/audio/translation_create_params.py,sha256=GT1rk1U7nqInbyahHxBXX8uSjmXGCySiIhI53DYgpa4,1524
|
||||
openai/types/audio_model.py,sha256=pxBVwf1HGd6mW-_jd-TDVMRZtTvvCUn_rL8Pt1BXzuo,208
|
||||
openai/types/audio/speech_create_params.py,sha256=uae8hceXzm75E3QXBC9dRMunYA2Mj2m7lUiG_fbuN70,1278
|
||||
openai/types/audio/transcription.py,sha256=jP13KGV0ZSgK3FkIZueDLrH4Yhafp5FkXBEP85deBAo,231
|
||||
openai/types/audio/transcription_create_params.py,sha256=H7LOzb4VHwhF_cm0MXMIDgfglmbu-T-gcrp1i2HJBqI,2226
|
||||
openai/types/audio/translation.py,sha256=_PhTtQ-s1yc-4kAKlgc88FTqUpXnNYfM2ld5IuRRGkA,195
|
||||
openai/types/audio/translation_create_params.py,sha256=pynqbAozfcVwu1U6C6xvauZSFlQxIz1cswSXJLfRI30,1506
|
||||
openai/types/batch.py,sha256=eIOIaJnDuv93fdefTI0WRfTm7MZH8gLBdF0B12JCiZw,2787
|
||||
openai/types/batch_create_params.py,sha256=Wq-uHe9FcAPTtN68jEG2xMZWwOC8Q7Dg4GdxV_y5qP0,1441
|
||||
openai/types/batch_error.py,sha256=Xxl-gYm0jerpYyI-mKSSVxRMQRubkoLUiOP9U3v72EM,622
|
||||
openai/types/batch_list_params.py,sha256=X1_sfRspuIMSDyXWVh0YnJ9vJLeOOH66TrvgEHueC84,705
|
||||
openai/types/batch_request_counts.py,sha256=GHHrJKdJwJ3foBa1j9v5Vece_zzkdXXXgOcne8W1E30,409
|
||||
openai/types/beta/__init__.py,sha256=5ojZzNm9el-L9sXfh0E8D2t7N55jmuK_GMEkx9Yn09s,2723
|
||||
openai/types/batch_request_counts.py,sha256=nOzdL84OlZRycVNW99EDkdjCFqqKh68emaWT4Lx7dBE,410
|
||||
openai/types/beta/__init__.py,sha256=z2VmuulluQs5lVF22u2-FvbTQLpVhtz6hEcM1iUAXZc,2919
|
||||
openai/types/beta/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_create_params.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_deleted.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_list_params.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_response_format.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_response_format_option.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_response_format_option_param.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_response_format_param.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_stream_event.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_tool.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/assistant_tool_choice.cpython-38.pyc,,
|
||||
@ -309,36 +269,38 @@ openai/types/beta/__pycache__/vector_store_create_params.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/vector_store_deleted.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/vector_store_list_params.cpython-38.pyc,,
|
||||
openai/types/beta/__pycache__/vector_store_update_params.cpython-38.pyc,,
|
||||
openai/types/beta/assistant.py,sha256=hn9R5DZ_WSwHRkFAX5biZGC48rMK8ZOf4tSn4J70AAs,4950
|
||||
openai/types/beta/assistant_create_params.py,sha256=Av6Cbm37NNMQyhhdhilmbFbrj329sipMiUYGcaFBlx0,7267
|
||||
openai/types/beta/assistant.py,sha256=m5bgNTyelK6MA1RUrdyLg2yTalyR0Xm67K6iBOqlwSk,4674
|
||||
openai/types/beta/assistant_create_params.py,sha256=ITYisGRS6ZZRfN2TPm9Blv_PyNFakNYD6ZzyxuMJUvY,7695
|
||||
openai/types/beta/assistant_deleted.py,sha256=bTTUl5FPHTBI5nRm7d0sGuR9VCSBDZ-IbOn9G_IpmJQ,301
|
||||
openai/types/beta/assistant_list_params.py,sha256=1-osjSX8tKieHSP0xaKBBU8j-J01fKrrxIJRHDudFHk,1220
|
||||
openai/types/beta/assistant_response_format_option.py,sha256=yNeoAWxM-_8Sjmwqu8exqyKRFhVZIKeTypetPY55VFA,561
|
||||
openai/types/beta/assistant_response_format_option_param.py,sha256=dyPMhwRSLBZ0ltpxiD7KM-9X6BzWnbGeG-nT_3SenuQ,628
|
||||
openai/types/beta/assistant_stream_event.py,sha256=ORGXB7viddEHvK4Nb40wqVJylWLgkwVXH7qlyYG9nQE,6829
|
||||
openai/types/beta/assistant_tool.py,sha256=_0FC7Db4Ctq_0yLaKJ93zNTB5HthuJWEAHx3fadDRlw,506
|
||||
openai/types/beta/assistant_response_format.py,sha256=-JYxEihoHEHMak9E7KiyD5Zh_f3c-155j110mBDTFNE,378
|
||||
openai/types/beta/assistant_response_format_option.py,sha256=pDRz-lm-ASYhVIslXCulGAtO0c9Ulr6zVz-VltQQOh4,348
|
||||
openai/types/beta/assistant_response_format_option_param.py,sha256=JSH4wXdfgQBLMUagfVCn3clk9eErAUAiyZSQZ2XM-2w,410
|
||||
openai/types/beta/assistant_response_format_param.py,sha256=qtkwEg3hG3_ewmHH3E1hXsQqVqyMSTIOGFN9R1WTW0g,369
|
||||
openai/types/beta/assistant_stream_event.py,sha256=DjXs0bYypHHhRkrxl2YIjmFApeM3mXggPfEoNSIOvhw,6807
|
||||
openai/types/beta/assistant_tool.py,sha256=ci9elhBtBQY3_0FefsDuKxyLLRrl5m9e_PSvShZqTSo,478
|
||||
openai/types/beta/assistant_tool_choice.py,sha256=Hy4HIfPQCkWD8VruHHicuTkomNwljGHviQHk36prKhg,544
|
||||
openai/types/beta/assistant_tool_choice_function.py,sha256=aYMlVrZdX2JxmehDlyGALRK2PIEkO7VFEfsvY3VH6T4,270
|
||||
openai/types/beta/assistant_tool_choice_function.py,sha256=lMEPJrd2nIeNeTFTRKj8OTJmS--Zvu6kmzqjFR_iBlQ,271
|
||||
openai/types/beta/assistant_tool_choice_function_param.py,sha256=-O38277LhSaqOVhTp0haHP0ZnVTLpEBvcLJa5MRo7wE,355
|
||||
openai/types/beta/assistant_tool_choice_option.py,sha256=jrXMd_IYIQ1pt8Lkc-KrPd4CR3lR8sFV4m7_lpG8A4Y,362
|
||||
openai/types/beta/assistant_tool_choice_option_param.py,sha256=VcatO5Nej9e5eqfrwetG4uM1vFoewnBEcFz47IxAK2E,424
|
||||
openai/types/beta/assistant_tool_choice_option.py,sha256=WaLj1FSgQyLrss5hoKbmb19C0hzD5_WP3bWgzNdZIMM,340
|
||||
openai/types/beta/assistant_tool_choice_option_param.py,sha256=ODCix7ElFxtyABiL09OhaYbQy9RjICCSmILeqBFWeLE,402
|
||||
openai/types/beta/assistant_tool_choice_param.py,sha256=NOWx9SzZEwYaHeAyFZTQlG3pmogMNXzjPJDGQUlbv7Q,572
|
||||
openai/types/beta/assistant_tool_param.py,sha256=6DcaU3nMjurur2VkVIYcCaRAY1QLQscXXjCd0ZHHGho,501
|
||||
openai/types/beta/assistant_update_params.py,sha256=6Eo_HUAJdAwRo7X-zzp4z8PVs9glPS-UV_EWO7aqZL8,4698
|
||||
openai/types/beta/assistant_tool_param.py,sha256=xsB-Vq93uyS69m5zMoAc7keLXB_OSwEUH6XgB2g3ex4,450
|
||||
openai/types/beta/assistant_update_params.py,sha256=Z4MA4GtxZzV3a6PlUShoDmDHAIwo7AyVk9O5wUnFhe8,4422
|
||||
openai/types/beta/chat/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122
|
||||
openai/types/beta/chat/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/beta/code_interpreter_tool.py,sha256=7mgQc9OtD_ZUnZeNhoobMFcmmvtZPFCNYGB-PEnNnfs,333
|
||||
openai/types/beta/code_interpreter_tool_param.py,sha256=X6mwzFyZx1RCKEYbBCPs4kh_tZkxFxydPMK4yFNJkLs,389
|
||||
openai/types/beta/file_search_tool.py,sha256=6OH6Vt9rV9Ym4U4G61PP6UdnfE-lMWg_HzBFW6bQBNc,974
|
||||
openai/types/beta/file_search_tool_param.py,sha256=iDUCeoUJ1uLvUI9rDwerXwDOULw9_bnCZgjXwfWlDi0,981
|
||||
openai/types/beta/file_search_tool.py,sha256=BQCGKQaut845ThBWoqNrKGAp7v4VkXmB3ifgUc3RIJI,973
|
||||
openai/types/beta/file_search_tool_param.py,sha256=7OmrLkfz2If8JFRQSZ5CsLFQNaF8wNctD_KCVtZwsCs,980
|
||||
openai/types/beta/function_tool.py,sha256=oYGJfcfPpUohKw2ikgshDjOI1HXCK-5pAWyegYNezeU,397
|
||||
openai/types/beta/function_tool_param.py,sha256=hCclpGO4Re-TxiGy_QxX75g1kcN6_ElubicO6SdJ_YI,471
|
||||
openai/types/beta/function_tool_param.py,sha256=T_k2OX1OULgkrHHXw0rY_J-O0y5qA0lM-B58C64YyfM,453
|
||||
openai/types/beta/thread.py,sha256=wd00j3ogUpOa_O0Sf1m6H4f8t1Nf05DKWiK_4m33O6s,2013
|
||||
openai/types/beta/thread_create_and_run_params.py,sha256=ZvuTJnslOhwzzEy946ICkftcopChwylK_KKLEHF3I3w,14477
|
||||
openai/types/beta/thread_create_params.py,sha256=0MlmA3nacpq1k7I-gxKvMes0Yo683grly1St4qUlOpQ,6215
|
||||
openai/types/beta/thread_create_and_run_params.py,sha256=mR1yoiK0wfhVwxmm97oTmK_UjIS48Tdmvj1VJUveBro,14781
|
||||
openai/types/beta/thread_create_params.py,sha256=oHJmUGjBln-SpwxB4RPoAtlcpEtLiPv1raSYZq33CG8,6182
|
||||
openai/types/beta/thread_deleted.py,sha256=MaYG_jZIjSiB9h_ZBiTtpMsRSwFKkCY83ziM5GO_oUk,292
|
||||
openai/types/beta/thread_update_params.py,sha256=RYsR88YHwReKLiLqnLlnWiReiVIGlEGvVV9-g_wptgM,1750
|
||||
openai/types/beta/threads/__init__.py,sha256=0WsJo0tXp08CgayozR7Tqc3b8sqzotWzvBun19CEIWc,3066
|
||||
openai/types/beta/threads/__init__.py,sha256=4zYanJwF_8IISSdkYNWKCm1nxgfZ9qZMfD_k1RMdXwE,2916
|
||||
openai/types/beta/threads/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/annotation.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/annotation_delta.cpython-38.pyc,,
|
||||
@ -368,8 +330,6 @@ openai/types/beta/threads/__pycache__/message_delta.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/message_delta_event.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/message_list_params.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/message_update_params.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/refusal_content_block.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/refusal_delta_block.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/required_action_function_tool_call.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/run.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/run_create_params.cpython-38.pyc,,
|
||||
@ -382,8 +342,8 @@ openai/types/beta/threads/__pycache__/text_content_block.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/text_content_block_param.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/text_delta.cpython-38.pyc,,
|
||||
openai/types/beta/threads/__pycache__/text_delta_block.cpython-38.pyc,,
|
||||
openai/types/beta/threads/annotation.py,sha256=Ce3Y0mSodmYRkoqyhtyIdep6WfWew6KJJgtrENOnfek,462
|
||||
openai/types/beta/threads/annotation_delta.py,sha256=iNsE-1Gn1yU0TlTHoxqKbOvPRUxWuXsF72qY_mMnWGY,510
|
||||
openai/types/beta/threads/annotation.py,sha256=3VHiyDhcR2G-cQ48_itBsXDWlmfpUJ7rnjeMh_DsHgg,440
|
||||
openai/types/beta/threads/annotation_delta.py,sha256=aJ1A_paDRkRVivuCZrmOL4QRvVW3KmZxsGUgOJ7uzUU,488
|
||||
openai/types/beta/threads/file_citation_annotation.py,sha256=0Rs1Sr-eCLQpLsu8-WwHG7kv5Ihud4kiHO1NL7xHO0s,595
|
||||
openai/types/beta/threads/file_citation_delta_annotation.py,sha256=R87tcXkJ0RiH5UJo0Qknwk7X_c4qF1qvGsu2spOPx-I,873
|
||||
openai/types/beta/threads/file_path_annotation.py,sha256=hNc4ebprJynqMG1yk0gLvgzTpjtVzgEbXriMZftkgew,552
|
||||
@ -400,23 +360,21 @@ openai/types/beta/threads/image_url_content_block_param.py,sha256=RWzo5KkBiwvgJS
|
||||
openai/types/beta/threads/image_url_delta.py,sha256=MXCp-OmuNT4njbWA9DWAbocP7pD3VpdcUy2wgeOjwm4,582
|
||||
openai/types/beta/threads/image_url_delta_block.py,sha256=Jjdfub4g9ceNKF8GuuTIghOmYba2vEeX3320mg5PWIA,484
|
||||
openai/types/beta/threads/image_url_param.py,sha256=VRLaxZf-wxnvAOcKGwyF_o6KEvwktBfE3B6KmYE5LZo,602
|
||||
openai/types/beta/threads/message.py,sha256=r3Lj8coZqEoz-2nT11yivi7K25CcUskbNSo4x7sWXPg,3294
|
||||
openai/types/beta/threads/message_content.py,sha256=b8IC_EG28hcXk28z09EABfJwPkYZ7U-lTp_9ykdoxvU,630
|
||||
openai/types/beta/threads/message_content_delta.py,sha256=o4Edlx9BtdH2Z4OMwGWWXex8wiijknNRihJ-wu8PDUQ,615
|
||||
openai/types/beta/threads/message_content_part_param.py,sha256=RXrnoDP2-UMQHoR2jJvaT3JHrCeffLi6WzXzH05cDGI,550
|
||||
openai/types/beta/threads/message_create_params.py,sha256=Qs7Gxs8ZKwzk_7ZhJOwj4KiHkidSmy5qc_Dam0P8F5E,1956
|
||||
openai/types/beta/threads/message.py,sha256=3GCF3xAWFDFBr92_Ri9vr5Z1MlOndMwc65uM43En5So,3272
|
||||
openai/types/beta/threads/message_content.py,sha256=s0ltWCTTauDJmY4hALMAAlWQ-6HyzBXmU1vOpDFLaL0,526
|
||||
openai/types/beta/threads/message_content_delta.py,sha256=8cLprwacM_0Z8SCOPfB6HWohwn4MUA_94VJhSsqfmNE,518
|
||||
openai/types/beta/threads/message_content_part_param.py,sha256=mJRYCkWPTuZFjqApydfp9ra5AIQFxQDb0EsR_wziD9c,499
|
||||
openai/types/beta/threads/message_create_params.py,sha256=Ez8AuNZBGXRJHgbsGfj9Xbojh09P77nq6AEZkWNj51E,1934
|
||||
openai/types/beta/threads/message_deleted.py,sha256=DNnrSfGZ3kWEazmo4mVTdLhiKlIHxs-D8Ef5sNdHY1o,303
|
||||
openai/types/beta/threads/message_delta.py,sha256=-kaRyvnIA8Yr2QV5jKRn15BU2Ni068a_WtWJ4PqlLfE,570
|
||||
openai/types/beta/threads/message_delta_event.py,sha256=7SpE4Dd3Lrc_cm97SzBwZzGGhfLqiFViDeTRQz-5YmQ,579
|
||||
openai/types/beta/threads/message_list_params.py,sha256=LXqc3deSkKO6VN337OlQ4fzG7dfgBE7Iv_CLzZHhbhw,1294
|
||||
openai/types/beta/threads/message_update_params.py,sha256=bw6_U-vZA4c9_CDmeGOh7IEPIm8BU3BBOKtxnii0LKA,629
|
||||
openai/types/beta/threads/refusal_content_block.py,sha256=qB9jrS2Wv9UQ7XXaIVKe62dTAU1WOnN3qenR_E43mhg,310
|
||||
openai/types/beta/threads/refusal_delta_block.py,sha256=ZhgFC8KqA9LIwo_CQIX-w3VVg3Vj0h71xC1Hh1bwmnU,423
|
||||
openai/types/beta/threads/required_action_function_tool_call.py,sha256=XsR4OBbxI-RWteLvhcLEDBan6eUUGvhLORFRKjPbsLg,888
|
||||
openai/types/beta/threads/run.py,sha256=_yc01xJzML8ekoB5mX9HGxlJUJmPaiukg0rpuQAK3Rc,8211
|
||||
openai/types/beta/threads/run_create_params.py,sha256=4wxYnpiZfeUfkW79Y3_qLJgqEHnY2b1_SXvyLwaD_FQ,9163
|
||||
openai/types/beta/threads/run.py,sha256=Xy7KraC5Au8zWkR7Dq3Z4oZaon5aFoUXaTezB3MbAR0,7935
|
||||
openai/types/beta/threads/run_create_params.py,sha256=CTvrFijerbPPM1RBejusA09nQW152lO8rtcy3JKNhi0,9488
|
||||
openai/types/beta/threads/run_list_params.py,sha256=73poqeRcb5TEsIVn7OzJ_g9OajNokEzpCVLzVNKZmPk,1208
|
||||
openai/types/beta/threads/run_status.py,sha256=OU1hzoyYXaRJ3lupX4YcZ-HZkTpctNE4tzAcp6X8Q9U,351
|
||||
openai/types/beta/threads/run_status.py,sha256=ky3dh-uD5OhuQB7e4BMQjRXvIDOUJnecTKGXr_PNcFY,329
|
||||
openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=aDrg0FZZoJKaPVQzcFjUg4ZKaeW8KF6UJBxhJEIjC2I,1630
|
||||
openai/types/beta/threads/run_update_params.py,sha256=76dWMNa3zCUliemCdwWv6p07GNeMYCdZoJs9KNbdZSE,621
|
||||
openai/types/beta/threads/runs/__init__.py,sha256=uhxk5F1_5c5wg2_p70AjlOy9cE3Ga8-ILn4Ep-gcls4,1515
|
||||
@ -441,20 +399,20 @@ openai/types/beta/threads/runs/__pycache__/tool_call_delta_object.cpython-38.pyc
|
||||
openai/types/beta/threads/runs/__pycache__/tool_calls_step_details.cpython-38.pyc,,
|
||||
openai/types/beta/threads/runs/code_interpreter_logs.py,sha256=7wXZpUE9I-oZJ0K3mFG0Nwmfm2bKGiSpWJyBeo7txwo,482
|
||||
openai/types/beta/threads/runs/code_interpreter_output_image.py,sha256=8o99k0ZHMHpqH0taXkOkYR9WaDUpCN-G0Ifd5XsJpb8,613
|
||||
openai/types/beta/threads/runs/code_interpreter_tool_call.py,sha256=ekiIuH1kVCN51hCzY3AYr5i3_a4vlgUiZHJ59pl17oY,1810
|
||||
openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py,sha256=Qr2cen-bKyXTW2NDEUHnmJRE0jY-nkLcnO4NzCbBPDo,1479
|
||||
openai/types/beta/threads/runs/code_interpreter_tool_call.py,sha256=Ydsi3ob7fyv1MqPY6tlZCD254Cc5XNLO-ddEGtKdqj4,1788
|
||||
openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py,sha256=eD-tvfFD7arq4w7dzQJFkmHrvLguVrDjpAJRNH6EwIE,1457
|
||||
openai/types/beta/threads/runs/file_search_tool_call.py,sha256=PPxrJP3r4RWFTeE5mU-9SbFz37JmKHOGfsxlZGydyW0,522
|
||||
openai/types/beta/threads/runs/file_search_tool_call_delta.py,sha256=Gx8c7GSgGYuOvGadcAr3ZIspEFMZS3e2OY7vBo_MYnM,655
|
||||
openai/types/beta/threads/runs/function_tool_call.py,sha256=aOq5yOtKOi6C5Q1FIQRxqtJJR1AcSW_K5PvRiKISNCI,920
|
||||
openai/types/beta/threads/runs/function_tool_call_delta.py,sha256=VFRtCJkj4PHX97upM1cXpJAk9-JvJSgyngie06fBIjQ,1076
|
||||
openai/types/beta/threads/runs/message_creation_step_details.py,sha256=tRFMNF2Rf4DekVliUKkoujItiOjjAE9EG9bbxJvpVPA,506
|
||||
openai/types/beta/threads/runs/run_step.py,sha256=NA3QgTsa646h3LYV13K3CdpsEQHaaD1QN2zT7STo6zo,3468
|
||||
openai/types/beta/threads/runs/run_step_delta.py,sha256=FNYDTddRrTO3PT_fgi7AsJ1PeMtyWsVzcxoihjbBzAw,663
|
||||
openai/types/beta/threads/runs/run_step.py,sha256=UvPakztDIofP8K80Q1gfQSXF18xxp2w9KWRwrcHhjnE,3440
|
||||
openai/types/beta/threads/runs/run_step_delta.py,sha256=lNPH43tdQMHHEiaxaS0FtLXsqtH5xOJpYJlAroj7PHg,635
|
||||
openai/types/beta/threads/runs/run_step_delta_event.py,sha256=rkDyvHSXt-hc1LngB41f9vglkn6t03kS62bsn0iGaxU,585
|
||||
openai/types/beta/threads/runs/run_step_delta_message_delta.py,sha256=UIo6oPH8STLjPHiWL-A4CtKfYe49uptvIAHWNnZ3Ums,564
|
||||
openai/types/beta/threads/runs/step_list_params.py,sha256=2vMPFMElvK135ncP9ch6kUnzPGOSIPT3Eio18jJhAqk,1250
|
||||
openai/types/beta/threads/runs/tool_call.py,sha256=1rwq4IbLgjQAQ-ORXYkNpmJyi9SREDnqA57nJbj_NiU,537
|
||||
openai/types/beta/threads/runs/tool_call_delta.py,sha256=t5wF8ndW3z99lHF981FL-IN5xXBS9p7eonH9bxvKu_c,600
|
||||
openai/types/beta/threads/runs/tool_call.py,sha256=zyck1JNKBPCIGCMrJN6P850D10Y36FO6LwrX2WM_YR8,515
|
||||
openai/types/beta/threads/runs/tool_call_delta.py,sha256=OZeU5fF-77_oG87xNVn_wZo4SpDfjJ5ND9rIQQYKPoE,578
|
||||
openai/types/beta/threads/runs/tool_call_delta_object.py,sha256=eK20VsIswEyT48XbkGu60HUrE7OD3fhpn1fbXrVauM4,615
|
||||
openai/types/beta/threads/runs/tool_calls_step_details.py,sha256=bDa-yybVF3a8H6VqhDGmFZMkpn-0gtPQM2jWWsmUvYo,574
|
||||
openai/types/beta/threads/text.py,sha256=9gjmDCqoptnxQ8Jhym87pECyd6m1lB3daCxKNzSFp4Y,319
|
||||
@ -463,7 +421,7 @@ openai/types/beta/threads/text_content_block_param.py,sha256=feQr0muF845tc1q3FJr
|
||||
openai/types/beta/threads/text_delta.py,sha256=2EFeQCkg_cc8nYEJ6BtYAA3_TqgMTbmEXoMvLjzaB34,389
|
||||
openai/types/beta/threads/text_delta_block.py,sha256=pkHkVBgNsmHi9JURzs5ayPqxQXSkex3F0jH0MqJXik0,448
|
||||
openai/types/beta/vector_store.py,sha256=zaSaSUpStD3iuyas9f7VQCNF1byxnXRz_5q36eizNGE,2353
|
||||
openai/types/beta/vector_store_create_params.py,sha256=Q-pD0r2gg0PpTQdrltvQ-YdG8dDF48i8EoBsr0PRPWM,2509
|
||||
openai/types/beta/vector_store_create_params.py,sha256=xR7d4Nb5Gka9U01jP4YI3BkaQKOOvQK0-Nm2cYlAFHI,2487
|
||||
openai/types/beta/vector_store_deleted.py,sha256=Yq0E1orRLShseLwZ1deiBdDEUgEw_tcYVxGYa5gbIrM,308
|
||||
openai/types/beta/vector_store_list_params.py,sha256=8iUgSgs_TeehprKjtTLWOGeH_R8LbDdLkdwMq9xVpSA,1224
|
||||
openai/types/beta/vector_store_update_params.py,sha256=AHlOV4f36UWAH4k7XKlGa51Mfao2f7339qI3fskWbIk,1114
|
||||
@ -476,21 +434,20 @@ openai/types/beta/vector_stores/__pycache__/file_list_params.cpython-38.pyc,,
|
||||
openai/types/beta/vector_stores/__pycache__/vector_store_file.cpython-38.pyc,,
|
||||
openai/types/beta/vector_stores/__pycache__/vector_store_file_batch.cpython-38.pyc,,
|
||||
openai/types/beta/vector_stores/__pycache__/vector_store_file_deleted.cpython-38.pyc,,
|
||||
openai/types/beta/vector_stores/file_batch_create_params.py,sha256=c3Syo18qBuM1NYOopZN0CLigi864uo9jyFa4W6lH4i4,1922
|
||||
openai/types/beta/vector_stores/file_batch_create_params.py,sha256=SxbCh73KOTAAAV4YpkyuUbswn1qjFCRbxpRiwI-Dwhg,1900
|
||||
openai/types/beta/vector_stores/file_batch_list_files_params.py,sha256=6c_KvnlFV0vkFid_thhyEK6HC6F1ixbDh2roExL_-qk,1449
|
||||
openai/types/beta/vector_stores/file_create_params.py,sha256=6gCvIuEgvaPIIGS8IHwWQVTfwPgDN2Mt5zrDZY9z-4M,1890
|
||||
openai/types/beta/vector_stores/file_create_params.py,sha256=WxmFQirKmZ9DwtBeAssPkpyhG4xO7vtHC6Ublr8FqY8,1868
|
||||
openai/types/beta/vector_stores/file_list_params.py,sha256=UC6NzZQ79tInL8xV3pMm66IFWsIT9PW_BhSbQLm4ar4,1383
|
||||
openai/types/beta/vector_stores/vector_store_file.py,sha256=TxefqQwU1vrJdINGdv91EQjyGZ-5eBqjAcUJAPvVETM,2730
|
||||
openai/types/beta/vector_stores/vector_store_file.py,sha256=0OY_Vt3oSYOayGWvlaYYnsLr2dlCBoJ2TMNTXLcXaGc,2726
|
||||
openai/types/beta/vector_stores/vector_store_file_batch.py,sha256=ubvj8z95EOdRGAp0rgI94g5uFQx0ob8hLgwOWHKda4E,1457
|
||||
openai/types/beta/vector_stores/vector_store_file_deleted.py,sha256=37J7oL2WYCgOd7Rhg2jX6IavaZT63vgUf3u6LC6C3Hs,322
|
||||
openai/types/chat/__init__.py,sha256=epD7g5z--KfkvxuhuvFS1uXFrlrV3djgoR-ORTYkbjI,3050
|
||||
openai/types/chat/__init__.py,sha256=i9KSiVYkzHjyIw2UijlmMiZS6Oky5JxaW5teonqW-T8,2583
|
||||
openai/types/chat/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_assistant_message_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_chunk.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_content_part_image_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_content_part_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_content_part_text_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_function_call_option_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_function_message_param.cpython-38.pyc,,
|
||||
@ -508,47 +465,41 @@ openai/types/chat/__pycache__/chat_completion_tool_message_param.cpython-38.pyc,
|
||||
openai/types/chat/__pycache__/chat_completion_tool_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/chat_completion_user_message_param.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/completion_create_params.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/parsed_chat_completion.cpython-38.pyc,,
|
||||
openai/types/chat/__pycache__/parsed_function_tool_call.cpython-38.pyc,,
|
||||
openai/types/chat/chat_completion.py,sha256=MaTVOMwtbzqGyHgyP4DP41ESEDKhv_XOM8L_fx3uoQE,2689
|
||||
openai/types/chat/chat_completion_assistant_message_param.py,sha256=0m5WjA97DuxiGGvUyJQnlkf1SqLEr2Ce-kUTBvtLbBc,2114
|
||||
openai/types/chat/chat_completion_chunk.py,sha256=aQXFY4gq9YEIrr7YBM68D5XyWGT9kKo0JO8n-55IjEA,5032
|
||||
openai/types/chat/chat_completion.py,sha256=3WPQ6Mx0-C3Pj_f26zpAEw1TNPHqHCXm3tGZEjdg-bk,2548
|
||||
openai/types/chat/chat_completion_assistant_message_param.py,sha256=RxDU9AWIjVpVvYfhdVnuiGjVLPxjRMnwEDBrPrMNlT0,1648
|
||||
openai/types/chat/chat_completion_chunk.py,sha256=8kmNOrRW93uy8cSN0mOmjLtdVxhItK_PMJ_m3lgB-R0,4802
|
||||
openai/types/chat/chat_completion_content_part_image_param.py,sha256=ODHcWpe8TIXZQHXHhEEacrRHm_TCaFWZnml-bD85XiU,797
|
||||
openai/types/chat/chat_completion_content_part_param.py,sha256=8hoTnNqerHjaHGMFU8CvhjVbH8yChXEYxs3jLWKfod8,543
|
||||
openai/types/chat/chat_completion_content_part_refusal_param.py,sha256=TV1vu-IgrvKa5IBlPSIdBxUaW8g1zDhMOOBOEmhU2w0,467
|
||||
openai/types/chat/chat_completion_content_part_param.py,sha256=XGzw9ocldPg6Ke3ykNRuoxfORAAPtWXe4_SP1iURTDc,486
|
||||
openai/types/chat/chat_completion_content_part_text_param.py,sha256=4IpiXMKM9AuTyop5PRptPBbBhh9s93xy2vjg4Yw6NIw,429
|
||||
openai/types/chat/chat_completion_function_call_option_param.py,sha256=M-IqWHyBLkvYBcwFxxp4ydCIxbPDaMlNl4bik9UoFd4,365
|
||||
openai/types/chat/chat_completion_function_message_param.py,sha256=jIaZbBHHbt4v4xHCIyvYtYLst_X4jOznRjYNcTf0MF0,591
|
||||
openai/types/chat/chat_completion_message.py,sha256=CYVebAMTUfREmvkykqXSNE6tGzEJu1QzClZ_ZgFD73s,1371
|
||||
openai/types/chat/chat_completion_message_param.py,sha256=RFer4ZYXxVed9F0ulkqi0xNy_eOhp63Y-0oN24dhVBI,889
|
||||
openai/types/chat/chat_completion_message.py,sha256=19e2EL6cHZA6EeOVPgI_LbN3UwNLKizhtxuXnxLzhX0,1282
|
||||
openai/types/chat/chat_completion_message_param.py,sha256=RGdT7OjJPQTd2M0drDVNxBkUB-9DHMkQjNolaOY9nw0,838
|
||||
openai/types/chat/chat_completion_message_tool_call.py,sha256=XlIe2vhSYvrt8o8Yol5AQqnacI1xHqpEIV26G4oNrZY,900
|
||||
openai/types/chat/chat_completion_message_tool_call_param.py,sha256=XNhuUpGr5qwVTo0K8YavJwleHYSdwN_urK51eKlqC24,1009
|
||||
openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=JsxfSJYpOmF7zIreQ0JrXRSLp07OGCBSycRRcF6OZmg,569
|
||||
openai/types/chat/chat_completion_role.py,sha256=Rdzg4deI1uZmqgkwnMrLHvbV2fPRqKcHLQrVmKVk9Dw,262
|
||||
openai/types/chat/chat_completion_role.py,sha256=F5BlM6FMrJmqtCx3-W-KjhXXrVYAWv87_alwF7fOTSM,240
|
||||
openai/types/chat/chat_completion_stream_options_param.py,sha256=7-R2mYh7dbtX9qDOL3UkeyVH6FNWC_4aTCLtHYObMbs,628
|
||||
openai/types/chat/chat_completion_system_message_param.py,sha256=WYtzmsNP8ZI3Ie8cd-oU7RuNoaBF6-bBR3mOzST9hMw,815
|
||||
openai/types/chat/chat_completion_system_message_param.py,sha256=qWEJupmzMuUa82V7OoLeQF92SKE1QoU4cXfX2o43x9E,638
|
||||
openai/types/chat/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769
|
||||
openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=ef71WSM9HMQhIQUocRgVJUVW-bSRwK2_1NjFSB5TPiI,472
|
||||
openai/types/chat/chat_completion_tool_message_param.py,sha256=5K7jfKpwTuKNi1PTFabq_LHH-7wun8CUsLDh90U8zQE,730
|
||||
openai/types/chat/chat_completion_tool_param.py,sha256=J9r2TAWygkIBDInWEKx29gBE0wiCgc7HpXFyQhxSkAU,503
|
||||
openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=cGMIgf6e5KG1xbP1_dg-S_ktD78ECkDAPFekFBHH0PU,444
|
||||
openai/types/chat/chat_completion_tool_message_param.py,sha256=B-PST-J1VwPjaKLpzpmqfEsHlr5Owb54dnQoIhbvuY4,553
|
||||
openai/types/chat/chat_completion_tool_param.py,sha256=sve2G1DayUs-1CMzXK1x104r8KTa5K62CZdxoyLmFlk,485
|
||||
openai/types/chat/chat_completion_user_message_param.py,sha256=mik-MRkwb543C5FSJ52LtTkeA2E_HdLUgtoHEdO73XQ,792
|
||||
openai/types/chat/completion_create_params.py,sha256=3fWYkmZ5oXoxgATDlxnryd-4a3xRFaPBJjnGiPjsUJQ,11681
|
||||
openai/types/chat/parsed_chat_completion.py,sha256=KwcwCtj0yexl6gB7yuOnyETRW-uUvNRYbVzPMkwCe5Q,1437
|
||||
openai/types/chat/parsed_function_tool_call.py,sha256=hJzcKOpzf1tnXC6RGbPhaeCawq8EFdnLK_MfRITkW1U,920
|
||||
openai/types/chat_model.py,sha256=2-hBzw3AZXZ6E5qe7b9_BGTQhiPVoW9RdKHZ-Rrnm3o,755
|
||||
openai/types/chat/completion_create_params.py,sha256=bJZFVdyvmGAYUpidbsmrmwyC5--XND09y14M3VrNilY,10984
|
||||
openai/types/chat_model.py,sha256=LKD1BRCwkfWnPAZ2ER-3jxykKxrNG1vyGXxaGPYHw4g,659
|
||||
openai/types/completion.py,sha256=yuYVEVkJcMVUINNLglkxOJqCx097HKCYFeJun3Js73A,1172
|
||||
openai/types/completion_choice.py,sha256=PUk77T3Cp34UJSXoMfSzTKGWDK0rQQwq84X_PSlOUJo,965
|
||||
openai/types/completion_create_params.py,sha256=mEyR068kk36ZylY4d1K3sqnucpUz9fAqEyoEwmW3DtQ,7567
|
||||
openai/types/completion_usage.py,sha256=9m5PmCyfVy93ZfIszGpcG1gdcEaSr22HJgaTJ_ImVrs,435
|
||||
openai/types/completion_usage.py,sha256=MIa0LipVCM88I9h71aXF_aVkNVt47iTa74gDtWvDDTA,436
|
||||
openai/types/create_embedding_response.py,sha256=lTAu_Pym76kFljDnnDRoDB2GNQSzWmwwlqf5ff7FNPM,798
|
||||
openai/types/embedding.py,sha256=2pV6RTSf5UV6E86Xeud5ZwmjQjMS93m_4LrQ0GN3fho,637
|
||||
openai/types/embedding_create_params.py,sha256=3p7U8i2uG1SCpELbn_IeDMLkFe-vv7cyB5dx-_4U8iU,1885
|
||||
openai/types/file_content.py,sha256=qLlM4J8kgu1BfrtlmYftPsQVCJu4VqYeiS1T28u8EQ8,184
|
||||
openai/types/file_create_params.py,sha256=N1I3rER1se27usx46fhkvdtn-blJ6Y9ECT7Wwzve37Q,913
|
||||
openai/types/file_content.py,sha256=E2CsQejO19KSjdShjg5nsCtS4BbBwhPVDSfFEUA8ZNM,133
|
||||
openai/types/file_create_params.py,sha256=f8-xfoAlZPl5FuOz0h5sJTdAoBuJEIXVz_iyL9iTCbg,926
|
||||
openai/types/file_deleted.py,sha256=H_r9U7XthT5xHAo_4ay1EGGkc21eURt8MkkIBRYiQcw,277
|
||||
openai/types/file_list_params.py,sha256=VhZbSrCO0fYnUTgPE_nuBy-3A5MjpXiBtI-BahAc5SY,310
|
||||
openai/types/file_object.py,sha256=ESuRYCTLbDtHxyuhzybKTF_TztIcq_F7TzCTQ6JToE0,1309
|
||||
openai/types/file_purpose.py,sha256=o1TzR-41XsNsQ0791GTGPe3DLkU9FEODucKdP6Q6sPc,243
|
||||
openai/types/fine_tuning/__init__.py,sha256=SZvjq_22oY9E4zcnrvVd0ul9U4sk_IBeOd0MsNALu5s,806
|
||||
openai/types/fine_tuning/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-38.pyc,,
|
||||
@ -561,10 +512,10 @@ openai/types/fine_tuning/__pycache__/job_list_events_params.cpython-38.pyc,,
|
||||
openai/types/fine_tuning/__pycache__/job_list_params.cpython-38.pyc,,
|
||||
openai/types/fine_tuning/fine_tuning_job.py,sha256=YOcsIJZPPAqOnQudOkS_Am-peQuHyyvcMWVDxFvJdEA,3861
|
||||
openai/types/fine_tuning/fine_tuning_job_event.py,sha256=oCkO0yImLZnZQLeU4GH6YyUlDG25pzs41SCWWB-sd_o,374
|
||||
openai/types/fine_tuning/fine_tuning_job_integration.py,sha256=c3Uy7RMVJ32Xlat-6s9eG-5vZLl4w66COXc0B3pWk4g,242
|
||||
openai/types/fine_tuning/fine_tuning_job_integration.py,sha256=YZI3gQSE9zhfAcghYryzoug_IPfdog_fsjf2eCIMzD8,243
|
||||
openai/types/fine_tuning/fine_tuning_job_wandb_integration.py,sha256=YnBeiz14UuhUSpnD0KBj5V143qLvJbDIMcUVWOCBLXY,1026
|
||||
openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py,sha256=7vEc2uEV2c_DENBjhq0Qy5X8B-rzxsKvGECjnvF1Wdw,804
|
||||
openai/types/fine_tuning/job_create_params.py,sha256=Qhclk88QYCmmeBsPzfKBjNHHR5juW6-sUcRZDoSQH94,4693
|
||||
openai/types/fine_tuning/job_create_params.py,sha256=cN7dPHAEylB4X_ZX2BudlLoYf9mhEG48DSmspVsjVV8,4679
|
||||
openai/types/fine_tuning/job_list_events_params.py,sha256=4xOED4H2ky2mI9sIDytjmfJz5bNAdNWb70WIb_0bBWs,400
|
||||
openai/types/fine_tuning/job_list_params.py,sha256=yjxaEnESVTRpJ9ItvjKq30KcD_xz_trqKMIxG2eAriE,396
|
||||
openai/types/fine_tuning/jobs/__init__.py,sha256=nuWhOUsmsoVKTKMU35kknmr8sfpTF-kkIzyuOlRbJj0,295
|
||||
@ -574,50 +525,27 @@ openai/types/fine_tuning/jobs/__pycache__/fine_tuning_job_checkpoint.cpython-38.
|
||||
openai/types/fine_tuning/jobs/checkpoint_list_params.py,sha256=XoDLkkKCWmf5an5rnoVEpNK8mtQHq1fHw9EqmezfrXM,415
|
||||
openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py,sha256=Z_sUhebJY9nWSssZU7QoOJwe5sez76sCAuVeSO63XhY,1347
|
||||
openai/types/image.py,sha256=9No-8GHesOUbjchemY1jqtMwh_s22oBmLVFlLn2KoQo,607
|
||||
openai/types/image_create_variation_params.py,sha256=9FuF7N6Ju7BusvbQnMY5ddqHN_YInHkUlqaiVstxwYs,1477
|
||||
openai/types/image_edit_params.py,sha256=LvbWaTXVG_yneNnnpkNAocImIhqR-0jaHrRDlj7Tl2I,1837
|
||||
openai/types/image_generate_params.py,sha256=S1aA2trSzhLl2OXaFHtQiuJz6P7F_IIzPIswbvUYCjU,2132
|
||||
openai/types/image_model.py,sha256=W4YchkhJT2wZdlNDUpVkEKg8zdDDfp9S3oTf4D8Wr8g,219
|
||||
openai/types/image_create_variation_params.py,sha256=3f0qYfKrSuYA2gv7lyCq0FsRM36QctZ_Ki2YPLeNNj4,1450
|
||||
openai/types/image_edit_params.py,sha256=oQIiKqlU_59H1f0HtBlQw_BJ7mBEXRispfoGuDnfXHI,1810
|
||||
openai/types/image_generate_params.py,sha256=YztuD1oHepGqmP-m78Uhay67IgwGk7CspdAn2YWihlw,2116
|
||||
openai/types/images_response.py,sha256=EJ4qxYZ8CPGh2SZdRsyw6I0FnUvlgwxwc4NgPovJrvk,274
|
||||
openai/types/model.py,sha256=DMw8KwQx8B6S6sAI038D0xdzkmYdY5-r0oMhCUG4l6w,532
|
||||
openai/types/model_deleted.py,sha256=tXZybg03DunoOSYvwhT7zKj7KTN42R0VEs_-3PRliMo,229
|
||||
openai/types/model_deleted.py,sha256=rDGU-Ul4lMfNf5XxKNxZKo9CQPGsrkrzqnhl00GLMi4,230
|
||||
openai/types/moderation.py,sha256=ihR2jzld_BfOaHW1_6A2csTInEaJvAl5nPxuh_jegY4,3933
|
||||
openai/types/moderation_create_params.py,sha256=TADBGDorBDzcTzkylSB2eoN4cvRmZ0ADN00DzPdI1IA,948
|
||||
openai/types/moderation_create_params.py,sha256=Rz8kzoisqPihOLdPjrSchM0uml5VPHV8DqcrE56rwUs,954
|
||||
openai/types/moderation_create_response.py,sha256=e6SVfWX2_JX25Za0C6KojcnbMTtDB2A7cjUm6cFMKcs,484
|
||||
openai/types/moderation_model.py,sha256=zak2cYrNYevj0TItwsa2inX8NhQS0rUJ2Duhsbl7PxU,257
|
||||
openai/types/shared/__init__.py,sha256=34RJ2IUXj0f3B73a6rqeHILu8AH5-sC8npTbEx_bnk8,551
|
||||
openai/types/shared/__init__.py,sha256=eoiCHGKeY1_YjOn41M8QxvIUI_M68Ltsr1d67g_Pr-I,288
|
||||
openai/types/shared/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/shared/__pycache__/error_object.cpython-38.pyc,,
|
||||
openai/types/shared/__pycache__/function_definition.cpython-38.pyc,,
|
||||
openai/types/shared/__pycache__/function_parameters.cpython-38.pyc,,
|
||||
openai/types/shared/__pycache__/response_format_json_object.cpython-38.pyc,,
|
||||
openai/types/shared/__pycache__/response_format_json_schema.cpython-38.pyc,,
|
||||
openai/types/shared/__pycache__/response_format_text.cpython-38.pyc,,
|
||||
openai/types/shared/error_object.py,sha256=G7SGPZ9Qw3gewTKbi3fK69eM6L2Ur0C2D57N8iEapJA,305
|
||||
openai/types/shared/function_definition.py,sha256=8a5uHoIKrkrwTgfwTyE9ly4PgsZ3iLA_yRUAjubTb7Y,1447
|
||||
openai/types/shared/function_parameters.py,sha256=Dkc_pm98zCKyouQmYrl934cK8ZWX7heY_IIyunW8x7c,236
|
||||
openai/types/shared/response_format_json_object.py,sha256=15KTCXJ0o1W4c5V1vAcOQAx-u0eoIfAjxrHLoN3NuE4,344
|
||||
openai/types/shared/response_format_json_schema.py,sha256=rZS7diOPeqK48O_R6OYMJ6AtSGy_88PKTxzha6_56Fo,1399
|
||||
openai/types/shared/response_format_text.py,sha256=GX0u_40OLmDdSyawDrUcUk4jcrz1qWsKmmAMP4AD7hc,318
|
||||
openai/types/shared_params/__init__.py,sha256=GcNBmK_EPlGE-xPFmSQjlOq7SuNYd2nwDswX4ExHwoU,498
|
||||
openai/types/shared/function_definition.py,sha256=xnS4fQNu1EQ6LLv8HR5iSa5OVBP6SrpJpnPi2vg4Oig,1047
|
||||
openai/types/shared/function_parameters.py,sha256=jhabBaJFMgWfFduqmKQ0dkKfK5DWlwgde30SlZVcCYc,185
|
||||
openai/types/shared_params/__init__.py,sha256=Jaw3mmmUB3Ky7vL1fzsh-8kAJEbeYxcQ0JOy7p765Xo,235
|
||||
openai/types/shared_params/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/shared_params/__pycache__/function_definition.cpython-38.pyc,,
|
||||
openai/types/shared_params/__pycache__/function_parameters.cpython-38.pyc,,
|
||||
openai/types/shared_params/__pycache__/response_format_json_object.cpython-38.pyc,,
|
||||
openai/types/shared_params/__pycache__/response_format_json_schema.cpython-38.pyc,,
|
||||
openai/types/shared_params/__pycache__/response_format_text.cpython-38.pyc,,
|
||||
openai/types/shared_params/function_definition.py,sha256=ciMXqn1tFXnp1tg9weJW0uvtyvMLrnph3WXMg4IG1Vk,1482
|
||||
openai/types/shared_params/function_parameters.py,sha256=UvxKz_3b9b5ECwWr8RFrIH511htbU2JZsp9Z9BMkF-o,272
|
||||
openai/types/shared_params/response_format_json_object.py,sha256=QT4uJCK7RzN3HK17eGjEo36jLKOIBBNGjiX-zIa9iT4,390
|
||||
openai/types/shared_params/response_format_json_schema.py,sha256=Uu2ioeSbI64bm-jJ61OY8Lr3PpofTR4d2LNBcaYxlec,1360
|
||||
openai/types/shared_params/response_format_text.py,sha256=SjHeZAfgM1-HXAoKLrkiH-VZEnQ73XPTk_RgtJmEbU4,364
|
||||
openai/types/upload.py,sha256=mEeQTGS0uqFkxbDpJzgBUvuDhGVPw9cQxhRJjPBVeLo,1186
|
||||
openai/types/upload_complete_params.py,sha256=7On-iVAlA9p_nksLSFPBPR4QbB0xEtAW-skyh7S9gR0,504
|
||||
openai/types/upload_create_params.py,sha256=ZiZr1yC6g2VqL7KEnw7lhE4kZvU-F3DfTAc2TPk-XBo,889
|
||||
openai/types/uploads/__init__.py,sha256=fDsmd3L0nIWbFldbViOLvcQavsFA4SL3jsXDfAueAck,242
|
||||
openai/types/uploads/__pycache__/__init__.cpython-38.pyc,,
|
||||
openai/types/uploads/__pycache__/part_create_params.cpython-38.pyc,,
|
||||
openai/types/uploads/__pycache__/upload_part.cpython-38.pyc,,
|
||||
openai/types/uploads/part_create_params.py,sha256=pBByUzngaj70ov1knoSo_gpeBjaWP9D5EdiHwiG4G7U,362
|
||||
openai/types/uploads/upload_part.py,sha256=U9953cr9lJJLWEfhTiwHphRzLKARq3gWAWqrjxbhTR4,590
|
||||
openai/types/shared_params/function_definition.py,sha256=t-oOPy4Z1Gl-d7O9BejE5BjEScwZ9bkQtuRihFg5Dmw,1058
|
||||
openai/types/shared_params/function_parameters.py,sha256=vqZAZwPBh14Ykp84NFTXF_j0eoDyqF9V_d8-_n-KF9w,221
|
||||
openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62
|
@ -26,10 +26,8 @@ from ._exceptions import (
|
||||
AuthenticationError,
|
||||
InternalServerError,
|
||||
PermissionDeniedError,
|
||||
LengthFinishReasonError,
|
||||
UnprocessableEntityError,
|
||||
APIResponseValidationError,
|
||||
ContentFilterFinishReasonError,
|
||||
)
|
||||
from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
|
||||
from ._utils._logs import setup_logging as _setup_logging
|
||||
@ -57,8 +55,6 @@ __all__ = [
|
||||
"UnprocessableEntityError",
|
||||
"RateLimitError",
|
||||
"InternalServerError",
|
||||
"LengthFinishReasonError",
|
||||
"ContentFilterFinishReasonError",
|
||||
"Timeout",
|
||||
"RequestOptions",
|
||||
"Client",
|
||||
@ -76,7 +72,7 @@ __all__ = [
|
||||
"DefaultAsyncHttpxClient",
|
||||
]
|
||||
|
||||
from .lib import azure as _azure, pydantic_function_tool as pydantic_function_tool
|
||||
from .lib import azure as _azure
|
||||
from .version import VERSION as VERSION
|
||||
from .lib.azure import AzureOpenAI as AzureOpenAI, AsyncAzureOpenAI as AsyncAzureOpenAI
|
||||
from .lib._old_api import *
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
@ -126,14 +125,16 @@ class PageInfo:
|
||||
self,
|
||||
*,
|
||||
url: URL,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
params: Query,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -166,7 +167,8 @@ class BasePage(GenericModel, Generic[_T]):
|
||||
return False
|
||||
return self.next_page_info() is not None
|
||||
|
||||
def next_page_info(self) -> Optional[PageInfo]: ...
|
||||
def next_page_info(self) -> Optional[PageInfo]:
|
||||
...
|
||||
|
||||
def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body]
|
||||
...
|
||||
@ -902,7 +904,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
*,
|
||||
stream: Literal[True],
|
||||
stream_cls: Type[_StreamT],
|
||||
) -> _StreamT: ...
|
||||
) -> _StreamT:
|
||||
...
|
||||
|
||||
@overload
|
||||
def request(
|
||||
@ -912,7 +915,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
remaining_retries: Optional[int] = None,
|
||||
*,
|
||||
stream: Literal[False] = False,
|
||||
) -> ResponseT: ...
|
||||
) -> ResponseT:
|
||||
...
|
||||
|
||||
@overload
|
||||
def request(
|
||||
@ -923,7 +927,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
*,
|
||||
stream: bool = False,
|
||||
stream_cls: Type[_StreamT] | None = None,
|
||||
) -> ResponseT | _StreamT: ...
|
||||
) -> ResponseT | _StreamT:
|
||||
...
|
||||
|
||||
def request(
|
||||
self,
|
||||
@ -1046,7 +1051,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
response=response,
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
retries_taken=options.get_max_retries(self.max_retries) - retries,
|
||||
)
|
||||
|
||||
def _retry_request(
|
||||
@ -1088,7 +1092,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
response: httpx.Response,
|
||||
stream: bool,
|
||||
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
||||
retries_taken: int = 0,
|
||||
) -> ResponseT:
|
||||
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
|
||||
return cast(
|
||||
@ -1100,7 +1103,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
options=options,
|
||||
retries_taken=retries_taken,
|
||||
),
|
||||
)
|
||||
|
||||
@ -1120,7 +1122,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
options=options,
|
||||
retries_taken=retries_taken,
|
||||
),
|
||||
)
|
||||
|
||||
@ -1134,7 +1135,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
options=options,
|
||||
retries_taken=retries_taken,
|
||||
)
|
||||
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
|
||||
return cast(ResponseT, api_response)
|
||||
@ -1167,7 +1167,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
cast_to: Type[ResponseT],
|
||||
options: RequestOptions = {},
|
||||
stream: Literal[False] = False,
|
||||
) -> ResponseT: ...
|
||||
) -> ResponseT:
|
||||
...
|
||||
|
||||
@overload
|
||||
def get(
|
||||
@ -1178,7 +1179,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
options: RequestOptions = {},
|
||||
stream: Literal[True],
|
||||
stream_cls: type[_StreamT],
|
||||
) -> _StreamT: ...
|
||||
) -> _StreamT:
|
||||
...
|
||||
|
||||
@overload
|
||||
def get(
|
||||
@ -1189,7 +1191,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
options: RequestOptions = {},
|
||||
stream: bool,
|
||||
stream_cls: type[_StreamT] | None = None,
|
||||
) -> ResponseT | _StreamT: ...
|
||||
) -> ResponseT | _StreamT:
|
||||
...
|
||||
|
||||
def get(
|
||||
self,
|
||||
@ -1215,7 +1218,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
options: RequestOptions = {},
|
||||
files: RequestFiles | None = None,
|
||||
stream: Literal[False] = False,
|
||||
) -> ResponseT: ...
|
||||
) -> ResponseT:
|
||||
...
|
||||
|
||||
@overload
|
||||
def post(
|
||||
@ -1228,7 +1232,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
files: RequestFiles | None = None,
|
||||
stream: Literal[True],
|
||||
stream_cls: type[_StreamT],
|
||||
) -> _StreamT: ...
|
||||
) -> _StreamT:
|
||||
...
|
||||
|
||||
@overload
|
||||
def post(
|
||||
@ -1241,7 +1246,8 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
files: RequestFiles | None = None,
|
||||
stream: bool,
|
||||
stream_cls: type[_StreamT] | None = None,
|
||||
) -> ResponseT | _StreamT: ...
|
||||
) -> ResponseT | _StreamT:
|
||||
...
|
||||
|
||||
def post(
|
||||
self,
|
||||
@ -1474,7 +1480,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
*,
|
||||
stream: Literal[False] = False,
|
||||
remaining_retries: Optional[int] = None,
|
||||
) -> ResponseT: ...
|
||||
) -> ResponseT:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def request(
|
||||
@ -1485,7 +1492,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
stream: Literal[True],
|
||||
stream_cls: type[_AsyncStreamT],
|
||||
remaining_retries: Optional[int] = None,
|
||||
) -> _AsyncStreamT: ...
|
||||
) -> _AsyncStreamT:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def request(
|
||||
@ -1496,7 +1504,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
stream: bool,
|
||||
stream_cls: type[_AsyncStreamT] | None = None,
|
||||
remaining_retries: Optional[int] = None,
|
||||
) -> ResponseT | _AsyncStreamT: ...
|
||||
) -> ResponseT | _AsyncStreamT:
|
||||
...
|
||||
|
||||
async def request(
|
||||
self,
|
||||
@ -1616,7 +1625,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
response=response,
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
retries_taken=options.get_max_retries(self.max_retries) - retries,
|
||||
)
|
||||
|
||||
async def _retry_request(
|
||||
@ -1656,7 +1664,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
response: httpx.Response,
|
||||
stream: bool,
|
||||
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
||||
retries_taken: int = 0,
|
||||
) -> ResponseT:
|
||||
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
|
||||
return cast(
|
||||
@ -1668,7 +1675,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
options=options,
|
||||
retries_taken=retries_taken,
|
||||
),
|
||||
)
|
||||
|
||||
@ -1688,7 +1694,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
options=options,
|
||||
retries_taken=retries_taken,
|
||||
),
|
||||
)
|
||||
|
||||
@ -1702,7 +1707,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
stream=stream,
|
||||
stream_cls=stream_cls,
|
||||
options=options,
|
||||
retries_taken=retries_taken,
|
||||
)
|
||||
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
|
||||
return cast(ResponseT, api_response)
|
||||
@ -1725,7 +1729,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
cast_to: Type[ResponseT],
|
||||
options: RequestOptions = {},
|
||||
stream: Literal[False] = False,
|
||||
) -> ResponseT: ...
|
||||
) -> ResponseT:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def get(
|
||||
@ -1736,7 +1741,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
options: RequestOptions = {},
|
||||
stream: Literal[True],
|
||||
stream_cls: type[_AsyncStreamT],
|
||||
) -> _AsyncStreamT: ...
|
||||
) -> _AsyncStreamT:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def get(
|
||||
@ -1747,7 +1753,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
options: RequestOptions = {},
|
||||
stream: bool,
|
||||
stream_cls: type[_AsyncStreamT] | None = None,
|
||||
) -> ResponseT | _AsyncStreamT: ...
|
||||
) -> ResponseT | _AsyncStreamT:
|
||||
...
|
||||
|
||||
async def get(
|
||||
self,
|
||||
@ -1771,7 +1778,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
files: RequestFiles | None = None,
|
||||
options: RequestOptions = {},
|
||||
stream: Literal[False] = False,
|
||||
) -> ResponseT: ...
|
||||
) -> ResponseT:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def post(
|
||||
@ -1784,7 +1792,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
options: RequestOptions = {},
|
||||
stream: Literal[True],
|
||||
stream_cls: type[_AsyncStreamT],
|
||||
) -> _AsyncStreamT: ...
|
||||
) -> _AsyncStreamT:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def post(
|
||||
@ -1797,7 +1806,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
options: RequestOptions = {},
|
||||
stream: bool,
|
||||
stream_cls: type[_AsyncStreamT] | None = None,
|
||||
) -> ResponseT | _AsyncStreamT: ...
|
||||
) -> ResponseT | _AsyncStreamT:
|
||||
...
|
||||
|
||||
async def post(
|
||||
self,
|
||||
@ -2013,6 +2023,7 @@ def get_python_version() -> str:
|
||||
|
||||
def get_architecture() -> Arch:
|
||||
try:
|
||||
python_bitness, _ = platform.architecture()
|
||||
machine = platform.machine().lower()
|
||||
except Exception:
|
||||
return "unknown"
|
||||
@ -2028,7 +2039,7 @@ def get_architecture() -> Arch:
|
||||
return "x64"
|
||||
|
||||
# TODO: untested
|
||||
if sys.maxsize <= 2**32:
|
||||
if python_bitness == "32bit":
|
||||
return "x32"
|
||||
|
||||
if machine:
|
||||
|
@ -58,7 +58,6 @@ class OpenAI(SyncAPIClient):
|
||||
fine_tuning: resources.FineTuning
|
||||
beta: resources.Beta
|
||||
batches: resources.Batches
|
||||
uploads: resources.Uploads
|
||||
with_raw_response: OpenAIWithRawResponse
|
||||
with_streaming_response: OpenAIWithStreamedResponse
|
||||
|
||||
@ -144,14 +143,13 @@ class OpenAI(SyncAPIClient):
|
||||
self.fine_tuning = resources.FineTuning(self)
|
||||
self.beta = resources.Beta(self)
|
||||
self.batches = resources.Batches(self)
|
||||
self.uploads = resources.Uploads(self)
|
||||
self.with_raw_response = OpenAIWithRawResponse(self)
|
||||
self.with_streaming_response = OpenAIWithStreamedResponse(self)
|
||||
|
||||
@property
|
||||
@override
|
||||
def qs(self) -> Querystring:
|
||||
return Querystring(array_format="brackets")
|
||||
return Querystring(array_format="comma")
|
||||
|
||||
@property
|
||||
@override
|
||||
@ -272,7 +270,6 @@ class AsyncOpenAI(AsyncAPIClient):
|
||||
fine_tuning: resources.AsyncFineTuning
|
||||
beta: resources.AsyncBeta
|
||||
batches: resources.AsyncBatches
|
||||
uploads: resources.AsyncUploads
|
||||
with_raw_response: AsyncOpenAIWithRawResponse
|
||||
with_streaming_response: AsyncOpenAIWithStreamedResponse
|
||||
|
||||
@ -358,14 +355,13 @@ class AsyncOpenAI(AsyncAPIClient):
|
||||
self.fine_tuning = resources.AsyncFineTuning(self)
|
||||
self.beta = resources.AsyncBeta(self)
|
||||
self.batches = resources.AsyncBatches(self)
|
||||
self.uploads = resources.AsyncUploads(self)
|
||||
self.with_raw_response = AsyncOpenAIWithRawResponse(self)
|
||||
self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self)
|
||||
|
||||
@property
|
||||
@override
|
||||
def qs(self) -> Querystring:
|
||||
return Querystring(array_format="brackets")
|
||||
return Querystring(array_format="comma")
|
||||
|
||||
@property
|
||||
@override
|
||||
@ -487,7 +483,6 @@ class OpenAIWithRawResponse:
|
||||
self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning)
|
||||
self.beta = resources.BetaWithRawResponse(client.beta)
|
||||
self.batches = resources.BatchesWithRawResponse(client.batches)
|
||||
self.uploads = resources.UploadsWithRawResponse(client.uploads)
|
||||
|
||||
|
||||
class AsyncOpenAIWithRawResponse:
|
||||
@ -503,7 +498,6 @@ class AsyncOpenAIWithRawResponse:
|
||||
self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning)
|
||||
self.beta = resources.AsyncBetaWithRawResponse(client.beta)
|
||||
self.batches = resources.AsyncBatchesWithRawResponse(client.batches)
|
||||
self.uploads = resources.AsyncUploadsWithRawResponse(client.uploads)
|
||||
|
||||
|
||||
class OpenAIWithStreamedResponse:
|
||||
@ -519,7 +513,6 @@ class OpenAIWithStreamedResponse:
|
||||
self.fine_tuning = resources.FineTuningWithStreamingResponse(client.fine_tuning)
|
||||
self.beta = resources.BetaWithStreamingResponse(client.beta)
|
||||
self.batches = resources.BatchesWithStreamingResponse(client.batches)
|
||||
self.uploads = resources.UploadsWithStreamingResponse(client.uploads)
|
||||
|
||||
|
||||
class AsyncOpenAIWithStreamedResponse:
|
||||
@ -535,7 +528,6 @@ class AsyncOpenAIWithStreamedResponse:
|
||||
self.fine_tuning = resources.AsyncFineTuningWithStreamingResponse(client.fine_tuning)
|
||||
self.beta = resources.AsyncBetaWithStreamingResponse(client.beta)
|
||||
self.batches = resources.AsyncBatchesWithStreamingResponse(client.batches)
|
||||
self.uploads = resources.AsyncUploadsWithStreamingResponse(client.uploads)
|
||||
|
||||
|
||||
Client = OpenAI
|
||||
|
@ -7,7 +7,7 @@ from typing_extensions import Self
|
||||
import pydantic
|
||||
from pydantic.fields import FieldInfo
|
||||
|
||||
from ._types import IncEx, StrBytesIntFloat
|
||||
from ._types import StrBytesIntFloat
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
|
||||
@ -133,20 +133,17 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
|
||||
def model_dump(
|
||||
model: pydantic.BaseModel,
|
||||
*,
|
||||
exclude: IncEx = None,
|
||||
exclude_unset: bool = False,
|
||||
exclude_defaults: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
if PYDANTIC_V2:
|
||||
return model.model_dump(
|
||||
exclude=exclude,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
)
|
||||
return cast(
|
||||
"dict[str, Any]",
|
||||
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
|
||||
exclude=exclude,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
),
|
||||
@ -159,34 +156,25 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
|
||||
return model.parse_obj(data) # pyright: ignore[reportDeprecated]
|
||||
|
||||
|
||||
def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT:
|
||||
if PYDANTIC_V2:
|
||||
return model.model_validate_json(data)
|
||||
return model.parse_raw(data) # pyright: ignore[reportDeprecated]
|
||||
|
||||
|
||||
def model_json_schema(model: type[_ModelT]) -> dict[str, Any]:
|
||||
if PYDANTIC_V2:
|
||||
return model.model_json_schema()
|
||||
return model.schema() # pyright: ignore[reportDeprecated]
|
||||
|
||||
|
||||
# generic models
|
||||
if TYPE_CHECKING:
|
||||
|
||||
class GenericModel(pydantic.BaseModel): ...
|
||||
class GenericModel(pydantic.BaseModel):
|
||||
...
|
||||
|
||||
else:
|
||||
if PYDANTIC_V2:
|
||||
# there no longer needs to be a distinction in v2 but
|
||||
# we still have to create our own subclass to avoid
|
||||
# inconsistent MRO ordering errors
|
||||
class GenericModel(pydantic.BaseModel): ...
|
||||
class GenericModel(pydantic.BaseModel):
|
||||
...
|
||||
|
||||
else:
|
||||
import pydantic.generics
|
||||
|
||||
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
|
||||
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel):
|
||||
...
|
||||
|
||||
|
||||
# cached properties
|
||||
@ -205,21 +193,26 @@ if TYPE_CHECKING:
|
||||
func: Callable[[Any], _T]
|
||||
attrname: str | None
|
||||
|
||||
def __init__(self, func: Callable[[Any], _T]) -> None: ...
|
||||
def __init__(self, func: Callable[[Any], _T]) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
|
||||
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
|
||||
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T:
|
||||
...
|
||||
|
||||
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
|
||||
raise NotImplementedError()
|
||||
|
||||
def __set_name__(self, owner: type[Any], name: str) -> None: ...
|
||||
def __set_name__(self, owner: type[Any], name: str) -> None:
|
||||
...
|
||||
|
||||
# __set__ is not defined at runtime, but @cached_property is designed to be settable
|
||||
def __set__(self, instance: object, value: _T) -> None: ...
|
||||
def __set__(self, instance: object, value: _T) -> None:
|
||||
...
|
||||
else:
|
||||
try:
|
||||
from functools import cached_property as cached_property
|
||||
|
@ -19,8 +19,6 @@ __all__ = [
|
||||
"UnprocessableEntityError",
|
||||
"RateLimitError",
|
||||
"InternalServerError",
|
||||
"LengthFinishReasonError",
|
||||
"ContentFilterFinishReasonError",
|
||||
]
|
||||
|
||||
|
||||
@ -127,17 +125,3 @@ class RateLimitError(APIStatusError):
|
||||
|
||||
class InternalServerError(APIStatusError):
|
||||
pass
|
||||
|
||||
|
||||
class LengthFinishReasonError(OpenAIError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
f"Could not parse response content as the length limit was reached",
|
||||
)
|
||||
|
||||
|
||||
class ContentFilterFinishReasonError(OpenAIError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
f"Could not parse response content as the request was rejected by the content filter",
|
||||
)
|
||||
|
@ -39,11 +39,13 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
|
||||
|
||||
|
||||
@overload
|
||||
def to_httpx_files(files: None) -> None: ...
|
||||
def to_httpx_files(files: None) -> None:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
|
||||
def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
|
||||
...
|
||||
|
||||
|
||||
def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
|
||||
@ -81,11 +83,13 @@ def _read_file_content(file: FileContent) -> HttpxFileContent:
|
||||
|
||||
|
||||
@overload
|
||||
async def async_to_httpx_files(files: None) -> None: ...
|
||||
async def async_to_httpx_files(files: None) -> None:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
|
||||
async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
|
||||
...
|
||||
|
||||
|
||||
async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
|
||||
|
@ -5,18 +5,7 @@ import inspect
|
||||
import logging
|
||||
import datetime
|
||||
import functools
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Union,
|
||||
Generic,
|
||||
TypeVar,
|
||||
Callable,
|
||||
Iterator,
|
||||
AsyncIterator,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, Iterator, AsyncIterator, cast, overload
|
||||
from typing_extensions import Awaitable, ParamSpec, override, deprecated, get_origin
|
||||
|
||||
import anyio
|
||||
@ -64,9 +53,6 @@ class LegacyAPIResponse(Generic[R]):
|
||||
|
||||
http_response: httpx.Response
|
||||
|
||||
retries_taken: int
|
||||
"""The number of retries made. If no retries happened this will be `0`"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
@ -76,7 +62,6 @@ class LegacyAPIResponse(Generic[R]):
|
||||
stream: bool,
|
||||
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
||||
options: FinalRequestOptions,
|
||||
retries_taken: int = 0,
|
||||
) -> None:
|
||||
self._cast_to = cast_to
|
||||
self._client = client
|
||||
@ -85,17 +70,18 @@ class LegacyAPIResponse(Generic[R]):
|
||||
self._stream_cls = stream_cls
|
||||
self._options = options
|
||||
self.http_response = raw
|
||||
self.retries_taken = retries_taken
|
||||
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
|
||||
|
||||
@overload
|
||||
def parse(self, *, to: type[_T]) -> _T: ...
|
||||
def parse(self, *, to: type[_T]) -> _T:
|
||||
...
|
||||
|
||||
@overload
|
||||
def parse(self) -> R: ...
|
||||
def parse(self) -> R:
|
||||
...
|
||||
|
||||
def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
"""Returns the rich python representation of this response's data.
|
||||
|
@ -380,8 +380,6 @@ def is_basemodel(type_: type) -> bool:
|
||||
|
||||
def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]:
|
||||
origin = get_origin(type_) or type_
|
||||
if not inspect.isclass(origin):
|
||||
return False
|
||||
return issubclass(origin, BaseModel) or issubclass(origin, GenericModel)
|
||||
|
||||
|
||||
@ -408,15 +406,6 @@ def build(
|
||||
return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs))
|
||||
|
||||
|
||||
def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
|
||||
"""Loose coercion to the expected type with construction of nested values.
|
||||
|
||||
Note: the returned value from this function is not guaranteed to match the
|
||||
given type.
|
||||
"""
|
||||
return cast(_T, construct_type(value=value, type_=type_))
|
||||
|
||||
|
||||
def construct_type(*, value: object, type_: object) -> object:
|
||||
"""Loose coercion to the expected type with construction of nested values.
|
||||
|
||||
|
@ -55,9 +55,6 @@ class BaseAPIResponse(Generic[R]):
|
||||
|
||||
http_response: httpx.Response
|
||||
|
||||
retries_taken: int
|
||||
"""The number of retries made. If no retries happened this will be `0`"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
@ -67,7 +64,6 @@ class BaseAPIResponse(Generic[R]):
|
||||
stream: bool,
|
||||
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
||||
options: FinalRequestOptions,
|
||||
retries_taken: int = 0,
|
||||
) -> None:
|
||||
self._cast_to = cast_to
|
||||
self._client = client
|
||||
@ -76,7 +72,6 @@ class BaseAPIResponse(Generic[R]):
|
||||
self._stream_cls = stream_cls
|
||||
self._options = options
|
||||
self.http_response = raw
|
||||
self.retries_taken = retries_taken
|
||||
|
||||
@property
|
||||
def headers(self) -> httpx.Headers:
|
||||
@ -268,10 +263,12 @@ class APIResponse(BaseAPIResponse[R]):
|
||||
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
|
||||
|
||||
@overload
|
||||
def parse(self, *, to: type[_T]) -> _T: ...
|
||||
def parse(self, *, to: type[_T]) -> _T:
|
||||
...
|
||||
|
||||
@overload
|
||||
def parse(self) -> R: ...
|
||||
def parse(self) -> R:
|
||||
...
|
||||
|
||||
def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
"""Returns the rich python representation of this response's data.
|
||||
@ -374,10 +371,12 @@ class AsyncAPIResponse(BaseAPIResponse[R]):
|
||||
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
|
||||
|
||||
@overload
|
||||
async def parse(self, *, to: type[_T]) -> _T: ...
|
||||
async def parse(self, *, to: type[_T]) -> _T:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def parse(self) -> R: ...
|
||||
async def parse(self) -> R:
|
||||
...
|
||||
|
||||
async def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
"""Returns the rich python representation of this response's data.
|
||||
|
@ -112,7 +112,8 @@ class NotGiven:
|
||||
For example:
|
||||
|
||||
```py
|
||||
def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
|
||||
def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
|
||||
...
|
||||
|
||||
|
||||
get(timeout=1) # 1s timeout
|
||||
@ -162,14 +163,16 @@ class ModelBuilderProtocol(Protocol):
|
||||
*,
|
||||
response: Response,
|
||||
data: object,
|
||||
) -> _T: ...
|
||||
) -> _T:
|
||||
...
|
||||
|
||||
|
||||
Headers = Mapping[str, Union[str, Omit]]
|
||||
|
||||
|
||||
class HeadersLikeProtocol(Protocol):
|
||||
def get(self, __key: str) -> str | None: ...
|
||||
def get(self, __key: str) -> str | None:
|
||||
...
|
||||
|
||||
|
||||
HeadersLike = Union[Headers, HeadersLikeProtocol]
|
||||
|
@ -59,4 +59,5 @@ class LazyProxy(Generic[T], ABC):
|
||||
return cast(T, self)
|
||||
|
||||
@abstractmethod
|
||||
def __load__(self) -> T: ...
|
||||
def __load__(self) -> T:
|
||||
...
|
||||
|
@ -34,7 +34,7 @@ def assert_signatures_in_sync(
|
||||
|
||||
if custom_param.annotation != source_param.annotation:
|
||||
errors.append(
|
||||
f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}"
|
||||
f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(source_param.annotation)}"
|
||||
)
|
||||
continue
|
||||
|
||||
|
@ -211,17 +211,20 @@ def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]:
|
||||
Example usage:
|
||||
```py
|
||||
@overload
|
||||
def foo(*, a: str) -> str: ...
|
||||
def foo(*, a: str) -> str:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def foo(*, b: bool) -> str: ...
|
||||
def foo(*, b: bool) -> str:
|
||||
...
|
||||
|
||||
|
||||
# This enforces the same constraints that a static type checker would
|
||||
# i.e. that either a or b must be passed to the function
|
||||
@required_args(["a"], ["b"])
|
||||
def foo(*, a: str | None = None, b: bool | None = None) -> str: ...
|
||||
def foo(*, a: str | None = None, b: bool | None = None) -> str:
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
@ -283,15 +286,18 @@ _V = TypeVar("_V")
|
||||
|
||||
|
||||
@overload
|
||||
def strip_not_given(obj: None) -> None: ...
|
||||
def strip_not_given(obj: None) -> None:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ...
|
||||
def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def strip_not_given(obj: object) -> object: ...
|
||||
def strip_not_given(obj: object) -> object:
|
||||
...
|
||||
|
||||
|
||||
def strip_not_given(obj: object | None) -> object:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
__title__ = "openai"
|
||||
__version__ = "1.42.0" # x-release-please-version
|
||||
__version__ = "1.35.15" # x-release-please-version
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import TYPE_CHECKING, Any, Optional, cast
|
||||
from argparse import ArgumentParser
|
||||
|
||||
@ -8,7 +7,6 @@ from .._utils import get_client, print_model
|
||||
from ..._types import NOT_GIVEN
|
||||
from .._models import BaseModel
|
||||
from .._progress import BufferReader
|
||||
from ...types.audio import Transcription
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from argparse import _SubParsersAction
|
||||
@ -67,42 +65,30 @@ class CLIAudio:
|
||||
with open(args.file, "rb") as file_reader:
|
||||
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
|
||||
|
||||
model = cast(
|
||||
"Transcription | str",
|
||||
get_client().audio.transcriptions.create(
|
||||
file=(args.file, buffer_reader),
|
||||
model=args.model,
|
||||
language=args.language or NOT_GIVEN,
|
||||
temperature=args.temperature or NOT_GIVEN,
|
||||
prompt=args.prompt or NOT_GIVEN,
|
||||
# casts required because the API is typed for enums
|
||||
# but we don't want to validate that here for forwards-compat
|
||||
response_format=cast(Any, args.response_format),
|
||||
),
|
||||
model = get_client().audio.transcriptions.create(
|
||||
file=(args.file, buffer_reader),
|
||||
model=args.model,
|
||||
language=args.language or NOT_GIVEN,
|
||||
temperature=args.temperature or NOT_GIVEN,
|
||||
prompt=args.prompt or NOT_GIVEN,
|
||||
# casts required because the API is typed for enums
|
||||
# but we don't want to validate that here for forwards-compat
|
||||
response_format=cast(Any, args.response_format),
|
||||
)
|
||||
if isinstance(model, str):
|
||||
sys.stdout.write(model + "\n")
|
||||
else:
|
||||
print_model(model)
|
||||
print_model(model)
|
||||
|
||||
@staticmethod
|
||||
def translate(args: CLITranslationArgs) -> None:
|
||||
with open(args.file, "rb") as file_reader:
|
||||
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
|
||||
|
||||
model = cast(
|
||||
"Transcription | str",
|
||||
get_client().audio.translations.create(
|
||||
file=(args.file, buffer_reader),
|
||||
model=args.model,
|
||||
temperature=args.temperature or NOT_GIVEN,
|
||||
prompt=args.prompt or NOT_GIVEN,
|
||||
# casts required because the API is typed for enums
|
||||
# but we don't want to validate that here for forwards-compat
|
||||
response_format=cast(Any, args.response_format),
|
||||
),
|
||||
model = get_client().audio.translations.create(
|
||||
file=(args.file, buffer_reader),
|
||||
model=args.model,
|
||||
temperature=args.temperature or NOT_GIVEN,
|
||||
prompt=args.prompt or NOT_GIVEN,
|
||||
# casts required because the API is typed for enums
|
||||
# but we don't want to validate that here for forwards-compat
|
||||
response_format=cast(Any, args.response_format),
|
||||
)
|
||||
if isinstance(model, str):
|
||||
sys.stdout.write(model + "\n")
|
||||
else:
|
||||
print_model(model)
|
||||
print_model(model)
|
||||
|
@ -8,10 +8,12 @@ from ._utils import Colors, organization_info
|
||||
from .._exceptions import APIError, OpenAIError
|
||||
|
||||
|
||||
class CLIError(OpenAIError): ...
|
||||
class CLIError(OpenAIError):
|
||||
...
|
||||
|
||||
|
||||
class SilentCLIError(CLIError): ...
|
||||
class SilentCLIError(CLIError):
|
||||
...
|
||||
|
||||
|
||||
def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None:
|
||||
|
@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import shutil
|
||||
import tarfile
|
||||
import platform
|
||||
@ -84,9 +85,7 @@ def install() -> Path:
|
||||
if sys.platform == "win32":
|
||||
raise CLIError("Windows is not supported yet in the migration CLI")
|
||||
|
||||
_debug("Using Grit installer from GitHub")
|
||||
|
||||
platform = "apple-darwin" if sys.platform == "darwin" else "unknown-linux-gnu"
|
||||
platform = "macos" if sys.platform == "darwin" else "linux"
|
||||
|
||||
dir_name = _cache_dir() / "openai-python"
|
||||
install_dir = dir_name / ".install"
|
||||
@ -110,14 +109,27 @@ def install() -> Path:
|
||||
arch = _get_arch()
|
||||
_debug(f"Using architecture {arch}")
|
||||
|
||||
file_name = f"marzano-{arch}-{platform}"
|
||||
download_url = f"https://github.com/getgrit/gritql/releases/latest/download/{file_name}.tar.gz"
|
||||
file_name = f"marzano-{platform}-{arch}"
|
||||
meta_url = f"https://api.keygen.sh/v1/accounts/{KEYGEN_ACCOUNT}/artifacts/{file_name}"
|
||||
|
||||
sys.stdout.write(f"Downloading Grit CLI from {download_url}\n")
|
||||
sys.stdout.write(f"Retrieving Grit CLI metadata from {meta_url}\n")
|
||||
with httpx.Client() as client:
|
||||
download_response = client.get(download_url, follow_redirects=True)
|
||||
if download_response.status_code != 200:
|
||||
raise CLIError(f"Failed to download Grit CLI from {download_url}")
|
||||
response = client.get(meta_url) # pyright: ignore[reportUnknownMemberType]
|
||||
|
||||
data = response.json()
|
||||
errors = data.get("errors")
|
||||
if errors:
|
||||
for error in errors:
|
||||
sys.stdout.write(f"{error}\n")
|
||||
|
||||
raise CLIError("Could not locate Grit CLI binary - see above errors")
|
||||
|
||||
write_manifest(install_dir, data["data"]["relationships"]["release"]["data"]["id"])
|
||||
|
||||
link = data["data"]["links"]["redirect"]
|
||||
_debug(f"Redirect URL {link}")
|
||||
|
||||
download_response = client.get(link) # pyright: ignore[reportUnknownMemberType]
|
||||
with open(temp_file, "wb") as file:
|
||||
for chunk in download_response.iter_bytes():
|
||||
file.write(chunk)
|
||||
@ -131,7 +143,8 @@ def install() -> Path:
|
||||
else:
|
||||
archive.extractall(unpacked_dir)
|
||||
|
||||
_move_files_recursively(unpacked_dir, target_dir)
|
||||
for item in unpacked_dir.iterdir():
|
||||
item.rename(target_dir / item.name)
|
||||
|
||||
shutil.rmtree(unpacked_dir)
|
||||
os.remove(temp_file)
|
||||
@ -142,23 +155,30 @@ def install() -> Path:
|
||||
return target_path
|
||||
|
||||
|
||||
def _move_files_recursively(source_dir: Path, target_dir: Path) -> None:
|
||||
for item in source_dir.iterdir():
|
||||
if item.is_file():
|
||||
item.rename(target_dir / item.name)
|
||||
elif item.is_dir():
|
||||
_move_files_recursively(item, target_dir)
|
||||
|
||||
|
||||
def _get_arch() -> str:
|
||||
architecture = platform.machine().lower()
|
||||
|
||||
# Map the architecture names to Grit equivalents
|
||||
# Map the architecture names to Node.js equivalents
|
||||
arch_map = {
|
||||
"x86_64": "x86_64",
|
||||
"amd64": "x86_64",
|
||||
"armv7l": "aarch64",
|
||||
"arm64": "aarch64",
|
||||
"x86_64": "x64",
|
||||
"amd64": "x64",
|
||||
"armv7l": "arm",
|
||||
"aarch64": "arm64",
|
||||
}
|
||||
|
||||
return arch_map.get(architecture, architecture)
|
||||
|
||||
|
||||
def write_manifest(install_path: Path, release: str) -> None:
|
||||
manifest = {
|
||||
"installPath": str(install_path),
|
||||
"binaries": {
|
||||
"marzano": {
|
||||
"name": "marzano",
|
||||
"release": release,
|
||||
},
|
||||
},
|
||||
}
|
||||
manifest_path = Path(install_path) / "manifests.json"
|
||||
with open(manifest_path, "w") as f:
|
||||
json.dump(manifest, f, indent=2)
|
||||
|
@ -1,2 +0,0 @@
|
||||
from ._tools import pydantic_function_tool as pydantic_function_tool
|
||||
from ._parsing import ResponseFormatT as ResponseFormatT
|
@ -1,12 +0,0 @@
|
||||
from ._completions import (
|
||||
ResponseFormatT as ResponseFormatT,
|
||||
has_parseable_input,
|
||||
has_parseable_input as has_parseable_input,
|
||||
maybe_parse_content as maybe_parse_content,
|
||||
validate_input_tools as validate_input_tools,
|
||||
parse_chat_completion as parse_chat_completion,
|
||||
get_input_tool_by_name as get_input_tool_by_name,
|
||||
solve_response_format_t as solve_response_format_t,
|
||||
parse_function_tool_arguments as parse_function_tool_arguments,
|
||||
type_to_response_format_param as type_to_response_format_param,
|
||||
)
|
@ -1,264 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any, Iterable, cast
|
||||
from typing_extensions import TypeVar, TypeGuard, assert_never
|
||||
|
||||
import pydantic
|
||||
|
||||
from .._tools import PydanticFunctionTool
|
||||
from ..._types import NOT_GIVEN, NotGiven
|
||||
from ..._utils import is_dict, is_given
|
||||
from ..._compat import PYDANTIC_V2, model_parse_json
|
||||
from ..._models import construct_type_unchecked
|
||||
from .._pydantic import is_basemodel_type, to_strict_json_schema, is_dataclass_like_type
|
||||
from ...types.chat import (
|
||||
ParsedChoice,
|
||||
ChatCompletion,
|
||||
ParsedFunction,
|
||||
ParsedChatCompletion,
|
||||
ChatCompletionMessage,
|
||||
ParsedFunctionToolCall,
|
||||
ChatCompletionToolParam,
|
||||
ParsedChatCompletionMessage,
|
||||
completion_create_params,
|
||||
)
|
||||
from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
|
||||
from ...types.shared_params import FunctionDefinition
|
||||
from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
|
||||
from ...types.chat.chat_completion_message_tool_call import Function
|
||||
|
||||
ResponseFormatT = TypeVar(
|
||||
"ResponseFormatT",
|
||||
# if it isn't given then we don't do any parsing
|
||||
default=None,
|
||||
)
|
||||
_default_response_format: None = None
|
||||
|
||||
|
||||
def validate_input_tools(
|
||||
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> None:
|
||||
if not is_given(tools):
|
||||
return
|
||||
|
||||
for tool in tools:
|
||||
if tool["type"] != "function":
|
||||
raise ValueError(
|
||||
f'Currently only `function` tool types support auto-parsing; Received `{tool["type"]}`',
|
||||
)
|
||||
|
||||
strict = tool["function"].get("strict")
|
||||
if strict is not True:
|
||||
raise ValueError(
|
||||
f'`{tool["function"]["name"]}` is not strict. Only `strict` function tools can be auto-parsed'
|
||||
)
|
||||
|
||||
|
||||
def parse_chat_completion(
|
||||
*,
|
||||
response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | NotGiven,
|
||||
input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
|
||||
chat_completion: ChatCompletion | ParsedChatCompletion[object],
|
||||
) -> ParsedChatCompletion[ResponseFormatT]:
|
||||
if is_given(input_tools):
|
||||
input_tools = [t for t in input_tools]
|
||||
else:
|
||||
input_tools = []
|
||||
|
||||
choices: list[ParsedChoice[ResponseFormatT]] = []
|
||||
for choice in chat_completion.choices:
|
||||
if choice.finish_reason == "length":
|
||||
raise LengthFinishReasonError()
|
||||
|
||||
if choice.finish_reason == "content_filter":
|
||||
raise ContentFilterFinishReasonError()
|
||||
|
||||
message = choice.message
|
||||
|
||||
tool_calls: list[ParsedFunctionToolCall] = []
|
||||
if message.tool_calls:
|
||||
for tool_call in message.tool_calls:
|
||||
if tool_call.type == "function":
|
||||
tool_call_dict = tool_call.to_dict()
|
||||
tool_calls.append(
|
||||
construct_type_unchecked(
|
||||
value={
|
||||
**tool_call_dict,
|
||||
"function": {
|
||||
**cast(Any, tool_call_dict["function"]),
|
||||
"parsed_arguments": parse_function_tool_arguments(
|
||||
input_tools=input_tools, function=tool_call.function
|
||||
),
|
||||
},
|
||||
},
|
||||
type_=ParsedFunctionToolCall,
|
||||
)
|
||||
)
|
||||
elif TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(tool_call)
|
||||
else:
|
||||
tool_calls.append(tool_call)
|
||||
|
||||
choices.append(
|
||||
construct_type_unchecked(
|
||||
type_=cast(Any, ParsedChoice)[solve_response_format_t(response_format)],
|
||||
value={
|
||||
**choice.to_dict(),
|
||||
"message": {
|
||||
**message.to_dict(),
|
||||
"parsed": maybe_parse_content(
|
||||
response_format=response_format,
|
||||
message=message,
|
||||
),
|
||||
"tool_calls": tool_calls,
|
||||
},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
return cast(
|
||||
ParsedChatCompletion[ResponseFormatT],
|
||||
construct_type_unchecked(
|
||||
type_=cast(Any, ParsedChatCompletion)[solve_response_format_t(response_format)],
|
||||
value={
|
||||
**chat_completion.to_dict(),
|
||||
"choices": choices,
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None:
|
||||
return next((t for t in input_tools if t.get("function", {}).get("name") == name), None)
|
||||
|
||||
|
||||
def parse_function_tool_arguments(
|
||||
*, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction
|
||||
) -> object:
|
||||
input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name)
|
||||
if not input_tool:
|
||||
return None
|
||||
|
||||
input_fn = cast(object, input_tool.get("function"))
|
||||
if isinstance(input_fn, PydanticFunctionTool):
|
||||
return model_parse_json(input_fn.model, function.arguments)
|
||||
|
||||
input_fn = cast(FunctionDefinition, input_fn)
|
||||
|
||||
if not input_fn.get("strict"):
|
||||
return None
|
||||
|
||||
return json.loads(function.arguments)
|
||||
|
||||
|
||||
def maybe_parse_content(
|
||||
*,
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
message: ChatCompletionMessage | ParsedChatCompletionMessage[object],
|
||||
) -> ResponseFormatT | None:
|
||||
if has_rich_response_format(response_format) and message.content is not None and not message.refusal:
|
||||
return _parse_content(response_format, message.content)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def solve_response_format_t(
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
) -> type[ResponseFormatT]:
|
||||
"""Return the runtime type for the given response format.
|
||||
|
||||
If no response format is given, or if we won't auto-parse the response format
|
||||
then we default to `None`.
|
||||
"""
|
||||
if has_rich_response_format(response_format):
|
||||
return response_format
|
||||
|
||||
return cast("type[ResponseFormatT]", _default_response_format)
|
||||
|
||||
|
||||
def has_parseable_input(
|
||||
*,
|
||||
response_format: type | ResponseFormatParam | NotGiven,
|
||||
input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> bool:
|
||||
if has_rich_response_format(response_format):
|
||||
return True
|
||||
|
||||
for input_tool in input_tools or []:
|
||||
if is_parseable_tool(input_tool):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def has_rich_response_format(
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
) -> TypeGuard[type[ResponseFormatT]]:
|
||||
if not is_given(response_format):
|
||||
return False
|
||||
|
||||
if is_response_format_param(response_format):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_response_format_param(response_format: object) -> TypeGuard[ResponseFormatParam]:
|
||||
return is_dict(response_format)
|
||||
|
||||
|
||||
def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool:
|
||||
input_fn = cast(object, input_tool.get("function"))
|
||||
if isinstance(input_fn, PydanticFunctionTool):
|
||||
return True
|
||||
|
||||
return cast(FunctionDefinition, input_fn).get("strict") or False
|
||||
|
||||
|
||||
def _parse_content(response_format: type[ResponseFormatT], content: str) -> ResponseFormatT:
|
||||
if is_basemodel_type(response_format):
|
||||
return cast(ResponseFormatT, model_parse_json(response_format, content))
|
||||
|
||||
if is_dataclass_like_type(response_format):
|
||||
if not PYDANTIC_V2:
|
||||
raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {response_format}")
|
||||
|
||||
return pydantic.TypeAdapter(response_format).validate_json(content)
|
||||
|
||||
raise TypeError(f"Unable to automatically parse response format type {response_format}")
|
||||
|
||||
|
||||
def type_to_response_format_param(
|
||||
response_format: type | completion_create_params.ResponseFormat | NotGiven,
|
||||
) -> ResponseFormatParam | NotGiven:
|
||||
if not is_given(response_format):
|
||||
return NOT_GIVEN
|
||||
|
||||
if is_response_format_param(response_format):
|
||||
return response_format
|
||||
|
||||
# type checkers don't narrow the negation of a `TypeGuard` as it isn't
|
||||
# a safe default behaviour but we know that at this point the `response_format`
|
||||
# can only be a `type`
|
||||
response_format = cast(type, response_format)
|
||||
|
||||
json_schema_type: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any] | None = None
|
||||
|
||||
if is_basemodel_type(response_format):
|
||||
name = response_format.__name__
|
||||
json_schema_type = response_format
|
||||
elif is_dataclass_like_type(response_format):
|
||||
name = response_format.__name__
|
||||
json_schema_type = pydantic.TypeAdapter(response_format)
|
||||
else:
|
||||
raise TypeError(f"Unsupported response_format type - {response_format}")
|
||||
|
||||
return {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"schema": to_strict_json_schema(json_schema_type),
|
||||
"name": name,
|
||||
"strict": True,
|
||||
},
|
||||
}
|
@ -1,150 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from typing import Any, TypeVar
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
import pydantic
|
||||
|
||||
from .._types import NOT_GIVEN
|
||||
from .._utils import is_dict as _is_dict, is_list
|
||||
from .._compat import PYDANTIC_V2, model_json_schema
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
def to_strict_json_schema(model: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any]) -> dict[str, Any]:
|
||||
if inspect.isclass(model) and is_basemodel_type(model):
|
||||
schema = model_json_schema(model)
|
||||
elif PYDANTIC_V2 and isinstance(model, pydantic.TypeAdapter):
|
||||
schema = model.json_schema()
|
||||
else:
|
||||
raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {model}")
|
||||
|
||||
return _ensure_strict_json_schema(schema, path=(), root=schema)
|
||||
|
||||
|
||||
def _ensure_strict_json_schema(
|
||||
json_schema: object,
|
||||
*,
|
||||
path: tuple[str, ...],
|
||||
root: dict[str, object],
|
||||
) -> dict[str, Any]:
|
||||
"""Mutates the given JSON schema to ensure it conforms to the `strict` standard
|
||||
that the API expects.
|
||||
"""
|
||||
if not is_dict(json_schema):
|
||||
raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
|
||||
|
||||
defs = json_schema.get("$defs")
|
||||
if is_dict(defs):
|
||||
for def_name, def_schema in defs.items():
|
||||
_ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name), root=root)
|
||||
|
||||
definitions = json_schema.get("definitions")
|
||||
if is_dict(definitions):
|
||||
for definition_name, definition_schema in definitions.items():
|
||||
_ensure_strict_json_schema(definition_schema, path=(*path, "definitions", definition_name), root=root)
|
||||
|
||||
typ = json_schema.get("type")
|
||||
if typ == "object" and "additionalProperties" not in json_schema:
|
||||
json_schema["additionalProperties"] = False
|
||||
|
||||
# object types
|
||||
# { 'type': 'object', 'properties': { 'a': {...} } }
|
||||
properties = json_schema.get("properties")
|
||||
if is_dict(properties):
|
||||
json_schema["required"] = [prop for prop in properties.keys()]
|
||||
json_schema["properties"] = {
|
||||
key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key), root=root)
|
||||
for key, prop_schema in properties.items()
|
||||
}
|
||||
|
||||
# arrays
|
||||
# { 'type': 'array', 'items': {...} }
|
||||
items = json_schema.get("items")
|
||||
if is_dict(items):
|
||||
json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"), root=root)
|
||||
|
||||
# unions
|
||||
any_of = json_schema.get("anyOf")
|
||||
if is_list(any_of):
|
||||
json_schema["anyOf"] = [
|
||||
_ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i)), root=root)
|
||||
for i, variant in enumerate(any_of)
|
||||
]
|
||||
|
||||
# intersections
|
||||
all_of = json_schema.get("allOf")
|
||||
if is_list(all_of):
|
||||
if len(all_of) == 1:
|
||||
json_schema.update(_ensure_strict_json_schema(all_of[0], path=(*path, "allOf", "0"), root=root))
|
||||
json_schema.pop("allOf")
|
||||
else:
|
||||
json_schema["allOf"] = [
|
||||
_ensure_strict_json_schema(entry, path=(*path, "allOf", str(i)), root=root)
|
||||
for i, entry in enumerate(all_of)
|
||||
]
|
||||
|
||||
# strip `None` defaults as there's no meaningful distinction here
|
||||
# the schema will still be `nullable` and the model will default
|
||||
# to using `None` anyway
|
||||
if json_schema.get("default", NOT_GIVEN) is None:
|
||||
json_schema.pop("default")
|
||||
|
||||
# we can't use `$ref`s if there are also other properties defined, e.g.
|
||||
# `{"$ref": "...", "description": "my description"}`
|
||||
#
|
||||
# so we unravel the ref
|
||||
# `{"type": "string", "description": "my description"}`
|
||||
ref = json_schema.get("$ref")
|
||||
if ref and has_more_than_n_keys(json_schema, 1):
|
||||
assert isinstance(ref, str), f"Received non-string $ref - {ref}"
|
||||
|
||||
resolved = resolve_ref(root=root, ref=ref)
|
||||
if not is_dict(resolved):
|
||||
raise ValueError(f"Expected `$ref: {ref}` to resolved to a dictionary but got {resolved}")
|
||||
|
||||
# properties from the json schema take priority over the ones on the `$ref`
|
||||
json_schema.update({**resolved, **json_schema})
|
||||
json_schema.pop("$ref")
|
||||
|
||||
return json_schema
|
||||
|
||||
|
||||
def resolve_ref(*, root: dict[str, object], ref: str) -> object:
|
||||
if not ref.startswith("#/"):
|
||||
raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/")
|
||||
|
||||
path = ref[2:].split("/")
|
||||
resolved = root
|
||||
for key in path:
|
||||
value = resolved[key]
|
||||
assert is_dict(value), f"encountered non-dictionary entry while resolving {ref} - {resolved}"
|
||||
resolved = value
|
||||
|
||||
return resolved
|
||||
|
||||
|
||||
def is_basemodel_type(typ: type) -> TypeGuard[type[pydantic.BaseModel]]:
|
||||
return issubclass(typ, pydantic.BaseModel)
|
||||
|
||||
|
||||
def is_dataclass_like_type(typ: type) -> bool:
|
||||
"""Returns True if the given type likely used `@pydantic.dataclass`"""
|
||||
return hasattr(typ, "__pydantic_config__")
|
||||
|
||||
|
||||
def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
|
||||
# just pretend that we know there are only `str` keys
|
||||
# as that check is not worth the performance cost
|
||||
return _is_dict(obj)
|
||||
|
||||
|
||||
def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:
|
||||
i = 0
|
||||
for _ in obj.keys():
|
||||
i += 1
|
||||
if i > n:
|
||||
return True
|
||||
return False
|
@ -1,54 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, cast
|
||||
|
||||
import pydantic
|
||||
|
||||
from ._pydantic import to_strict_json_schema
|
||||
from ..types.chat import ChatCompletionToolParam
|
||||
from ..types.shared_params import FunctionDefinition
|
||||
|
||||
|
||||
class PydanticFunctionTool(Dict[str, Any]):
|
||||
"""Dictionary wrapper so we can pass the given base model
|
||||
throughout the entire request stack without having to special
|
||||
case it.
|
||||
"""
|
||||
|
||||
model: type[pydantic.BaseModel]
|
||||
|
||||
def __init__(self, defn: FunctionDefinition, model: type[pydantic.BaseModel]) -> None:
|
||||
super().__init__(defn)
|
||||
self.model = model
|
||||
|
||||
def cast(self) -> FunctionDefinition:
|
||||
return cast(FunctionDefinition, self)
|
||||
|
||||
|
||||
def pydantic_function_tool(
|
||||
model: type[pydantic.BaseModel],
|
||||
*,
|
||||
name: str | None = None, # inferred from class name by default
|
||||
description: str | None = None, # inferred from class docstring by default
|
||||
) -> ChatCompletionToolParam:
|
||||
if description is None:
|
||||
# note: we intentionally don't use `.getdoc()` to avoid
|
||||
# including pydantic's docstrings
|
||||
description = model.__doc__
|
||||
|
||||
function = PydanticFunctionTool(
|
||||
{
|
||||
"name": name or model.__name__,
|
||||
"strict": True,
|
||||
"parameters": to_strict_json_schema(model),
|
||||
},
|
||||
model,
|
||||
).cast()
|
||||
|
||||
if description is not None:
|
||||
function["description"] = description
|
||||
|
||||
return {
|
||||
"type": "function",
|
||||
"function": function,
|
||||
}
|
@ -80,7 +80,8 @@ class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI):
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
http_client: httpx.Client | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@ -98,7 +99,8 @@ class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI):
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
http_client: httpx.Client | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@ -116,7 +118,8 @@ class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI):
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
http_client: httpx.Client | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -318,7 +321,8 @@ class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], Asy
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@ -337,7 +341,8 @@ class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], Asy
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
@ -356,7 +361,8 @@ class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], Asy
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None: ...
|
||||
) -> None:
|
||||
...
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -1,64 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ..._utils import is_dict, is_list
|
||||
|
||||
|
||||
def accumulate_delta(acc: dict[object, object], delta: dict[object, object]) -> dict[object, object]:
|
||||
for key, delta_value in delta.items():
|
||||
if key not in acc:
|
||||
acc[key] = delta_value
|
||||
continue
|
||||
|
||||
acc_value = acc[key]
|
||||
if acc_value is None:
|
||||
acc[key] = delta_value
|
||||
continue
|
||||
|
||||
# the `index` property is used in arrays of objects so it should
|
||||
# not be accumulated like other values e.g.
|
||||
# [{'foo': 'bar', 'index': 0}]
|
||||
#
|
||||
# the same applies to `type` properties as they're used for
|
||||
# discriminated unions
|
||||
if key == "index" or key == "type":
|
||||
acc[key] = delta_value
|
||||
continue
|
||||
|
||||
if isinstance(acc_value, str) and isinstance(delta_value, str):
|
||||
acc_value += delta_value
|
||||
elif isinstance(acc_value, (int, float)) and isinstance(delta_value, (int, float)):
|
||||
acc_value += delta_value
|
||||
elif is_dict(acc_value) and is_dict(delta_value):
|
||||
acc_value = accumulate_delta(acc_value, delta_value)
|
||||
elif is_list(acc_value) and is_list(delta_value):
|
||||
# for lists of non-dictionary items we'll only ever get new entries
|
||||
# in the array, existing entries will never be changed
|
||||
if all(isinstance(x, (str, int, float)) for x in acc_value):
|
||||
acc_value.extend(delta_value)
|
||||
continue
|
||||
|
||||
for delta_entry in delta_value:
|
||||
if not is_dict(delta_entry):
|
||||
raise TypeError(f"Unexpected list delta entry is not a dictionary: {delta_entry}")
|
||||
|
||||
try:
|
||||
index = delta_entry["index"]
|
||||
except KeyError as exc:
|
||||
raise RuntimeError(f"Expected list delta entry to have an `index` key; {delta_entry}") from exc
|
||||
|
||||
if not isinstance(index, int):
|
||||
raise TypeError(f"Unexpected, list delta entry `index` value is not an integer; {index}")
|
||||
|
||||
try:
|
||||
acc_entry = acc_value[index]
|
||||
except IndexError:
|
||||
acc_value.insert(index, delta_entry)
|
||||
else:
|
||||
if not is_dict(acc_entry):
|
||||
raise TypeError("not handled yet")
|
||||
|
||||
acc_value[index] = accumulate_delta(acc_entry, delta_entry)
|
||||
|
||||
acc[key] = acc_value
|
||||
|
||||
return acc
|
@ -1,26 +0,0 @@
|
||||
from ._types import (
|
||||
ParsedChoiceSnapshot as ParsedChoiceSnapshot,
|
||||
ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
|
||||
ParsedChatCompletionMessageSnapshot as ParsedChatCompletionMessageSnapshot,
|
||||
)
|
||||
from ._events import (
|
||||
ChunkEvent as ChunkEvent,
|
||||
ContentDoneEvent as ContentDoneEvent,
|
||||
RefusalDoneEvent as RefusalDoneEvent,
|
||||
ContentDeltaEvent as ContentDeltaEvent,
|
||||
RefusalDeltaEvent as RefusalDeltaEvent,
|
||||
LogprobsContentDoneEvent as LogprobsContentDoneEvent,
|
||||
LogprobsRefusalDoneEvent as LogprobsRefusalDoneEvent,
|
||||
ChatCompletionStreamEvent as ChatCompletionStreamEvent,
|
||||
LogprobsContentDeltaEvent as LogprobsContentDeltaEvent,
|
||||
LogprobsRefusalDeltaEvent as LogprobsRefusalDeltaEvent,
|
||||
ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
|
||||
FunctionToolCallArgumentsDoneEvent as FunctionToolCallArgumentsDoneEvent,
|
||||
FunctionToolCallArgumentsDeltaEvent as FunctionToolCallArgumentsDeltaEvent,
|
||||
)
|
||||
from ._completions import (
|
||||
ChatCompletionStream as ChatCompletionStream,
|
||||
AsyncChatCompletionStream as AsyncChatCompletionStream,
|
||||
ChatCompletionStreamManager as ChatCompletionStreamManager,
|
||||
AsyncChatCompletionStreamManager as AsyncChatCompletionStreamManager,
|
||||
)
|
@ -1,724 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, Generic, Callable, Iterable, Awaitable, AsyncIterator, cast
|
||||
from typing_extensions import Self, Iterator, assert_never
|
||||
|
||||
from jiter import from_json
|
||||
|
||||
from ._types import ParsedChoiceSnapshot, ParsedChatCompletionSnapshot, ParsedChatCompletionMessageSnapshot
|
||||
from ._events import (
|
||||
ChunkEvent,
|
||||
ContentDoneEvent,
|
||||
RefusalDoneEvent,
|
||||
ContentDeltaEvent,
|
||||
RefusalDeltaEvent,
|
||||
LogprobsContentDoneEvent,
|
||||
LogprobsRefusalDoneEvent,
|
||||
ChatCompletionStreamEvent,
|
||||
LogprobsContentDeltaEvent,
|
||||
LogprobsRefusalDeltaEvent,
|
||||
FunctionToolCallArgumentsDoneEvent,
|
||||
FunctionToolCallArgumentsDeltaEvent,
|
||||
)
|
||||
from .._deltas import accumulate_delta
|
||||
from ...._types import NOT_GIVEN, NotGiven
|
||||
from ...._utils import is_given, consume_sync_iterator, consume_async_iterator
|
||||
from ...._compat import model_dump
|
||||
from ...._models import build, construct_type
|
||||
from ..._parsing import (
|
||||
ResponseFormatT,
|
||||
has_parseable_input,
|
||||
maybe_parse_content,
|
||||
parse_chat_completion,
|
||||
get_input_tool_by_name,
|
||||
solve_response_format_t,
|
||||
parse_function_tool_arguments,
|
||||
)
|
||||
from ...._streaming import Stream, AsyncStream
|
||||
from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam
|
||||
from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
|
||||
from ....types.chat.chat_completion import ChoiceLogprobs
|
||||
from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk
|
||||
from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
|
||||
|
||||
|
||||
class ChatCompletionStream(Generic[ResponseFormatT]):
|
||||
"""Wrapper over the Chat Completions streaming API that adds helpful
|
||||
events such as `content.done`, supports automatically parsing
|
||||
responses & tool calls and accumulates a `ChatCompletion` object
|
||||
from each individual chunk.
|
||||
|
||||
https://platform.openai.com/docs/api-reference/streaming
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
raw_stream: Stream[ChatCompletionChunk],
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
|
||||
) -> None:
|
||||
self._raw_stream = raw_stream
|
||||
self._response = raw_stream.response
|
||||
self._iterator = self.__stream__()
|
||||
self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
|
||||
|
||||
def __next__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
|
||||
return self._iterator.__next__()
|
||||
|
||||
def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
for item in self._iterator:
|
||||
yield item
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
self._response.close()
|
||||
|
||||
def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
|
||||
"""Waits until the stream has been read to completion and returns
|
||||
the accumulated `ParsedChatCompletion` object.
|
||||
|
||||
If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
|
||||
property will be the content deserialised into that class, if there was any content returned
|
||||
by the API.
|
||||
"""
|
||||
self.until_done()
|
||||
return self._state.get_final_completion()
|
||||
|
||||
def until_done(self) -> Self:
|
||||
"""Blocks until the stream has been consumed."""
|
||||
consume_sync_iterator(self)
|
||||
return self
|
||||
|
||||
@property
|
||||
def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
|
||||
return self._state.current_completion_snapshot
|
||||
|
||||
def __stream__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
for sse_event in self._raw_stream:
|
||||
events_to_fire = self._state.handle_chunk(sse_event)
|
||||
for event in events_to_fire:
|
||||
yield event
|
||||
|
||||
|
||||
class ChatCompletionStreamManager(Generic[ResponseFormatT]):
|
||||
"""Context manager over a `ChatCompletionStream` that is returned by `.stream()`.
|
||||
|
||||
This context manager ensures the response cannot be leaked if you don't read
|
||||
the stream to completion.
|
||||
|
||||
Usage:
|
||||
```py
|
||||
with client.beta.chat.completions.stream(...) as stream:
|
||||
for event in stream:
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_request: Callable[[], Stream[ChatCompletionChunk]],
|
||||
*,
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
|
||||
) -> None:
|
||||
self.__stream: ChatCompletionStream[ResponseFormatT] | None = None
|
||||
self.__api_request = api_request
|
||||
self.__response_format = response_format
|
||||
self.__input_tools = input_tools
|
||||
|
||||
def __enter__(self) -> ChatCompletionStream[ResponseFormatT]:
|
||||
raw_stream = self.__api_request()
|
||||
|
||||
self.__stream = ChatCompletionStream(
|
||||
raw_stream=raw_stream,
|
||||
response_format=self.__response_format,
|
||||
input_tools=self.__input_tools,
|
||||
)
|
||||
|
||||
return self.__stream
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__stream is not None:
|
||||
self.__stream.close()
|
||||
|
||||
|
||||
class AsyncChatCompletionStream(Generic[ResponseFormatT]):
|
||||
"""Wrapper over the Chat Completions streaming API that adds helpful
|
||||
events such as `content.done`, supports automatically parsing
|
||||
responses & tool calls and accumulates a `ChatCompletion` object
|
||||
from each individual chunk.
|
||||
|
||||
https://platform.openai.com/docs/api-reference/streaming
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
raw_stream: AsyncStream[ChatCompletionChunk],
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
|
||||
) -> None:
|
||||
self._raw_stream = raw_stream
|
||||
self._response = raw_stream.response
|
||||
self._iterator = self.__stream__()
|
||||
self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
|
||||
|
||||
async def __anext__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
|
||||
return await self._iterator.__anext__()
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
async for item in self._iterator:
|
||||
yield item
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
await self.close()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
await self._response.aclose()
|
||||
|
||||
async def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
|
||||
"""Waits until the stream has been read to completion and returns
|
||||
the accumulated `ParsedChatCompletion` object.
|
||||
|
||||
If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
|
||||
property will be the content deserialised into that class, if there was any content returned
|
||||
by the API.
|
||||
"""
|
||||
await self.until_done()
|
||||
return self._state.get_final_completion()
|
||||
|
||||
async def until_done(self) -> Self:
|
||||
"""Blocks until the stream has been consumed."""
|
||||
await consume_async_iterator(self)
|
||||
return self
|
||||
|
||||
@property
|
||||
def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
|
||||
return self._state.current_completion_snapshot
|
||||
|
||||
async def __stream__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
async for sse_event in self._raw_stream:
|
||||
events_to_fire = self._state.handle_chunk(sse_event)
|
||||
for event in events_to_fire:
|
||||
yield event
|
||||
|
||||
|
||||
class AsyncChatCompletionStreamManager(Generic[ResponseFormatT]):
|
||||
"""Context manager over a `AsyncChatCompletionStream` that is returned by `.stream()`.
|
||||
|
||||
This context manager ensures the response cannot be leaked if you don't read
|
||||
the stream to completion.
|
||||
|
||||
Usage:
|
||||
```py
|
||||
async with client.beta.chat.completions.stream(...) as stream:
|
||||
for event in stream:
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_request: Awaitable[AsyncStream[ChatCompletionChunk]],
|
||||
*,
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
|
||||
) -> None:
|
||||
self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None
|
||||
self.__api_request = api_request
|
||||
self.__response_format = response_format
|
||||
self.__input_tools = input_tools
|
||||
|
||||
async def __aenter__(self) -> AsyncChatCompletionStream[ResponseFormatT]:
|
||||
raw_stream = await self.__api_request
|
||||
|
||||
self.__stream = AsyncChatCompletionStream(
|
||||
raw_stream=raw_stream,
|
||||
response_format=self.__response_format,
|
||||
input_tools=self.__input_tools,
|
||||
)
|
||||
|
||||
return self.__stream
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__stream is not None:
|
||||
await self.__stream.close()
|
||||
|
||||
|
||||
class ChatCompletionStreamState(Generic[ResponseFormatT]):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
) -> None:
|
||||
self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None
|
||||
self.__choice_event_states: list[ChoiceEventState] = []
|
||||
|
||||
self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else []
|
||||
self._response_format = response_format
|
||||
self._rich_response_format: type | NotGiven = response_format if inspect.isclass(response_format) else NOT_GIVEN
|
||||
|
||||
def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
|
||||
return parse_chat_completion(
|
||||
chat_completion=self.current_completion_snapshot,
|
||||
response_format=self._rich_response_format,
|
||||
input_tools=self._input_tools,
|
||||
)
|
||||
|
||||
@property
|
||||
def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
|
||||
assert self.__current_completion_snapshot is not None
|
||||
return self.__current_completion_snapshot
|
||||
|
||||
def handle_chunk(self, chunk: ChatCompletionChunk) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
"""Accumulate a new chunk into the snapshot and returns a list of events to yield."""
|
||||
self.__current_completion_snapshot = self._accumulate_chunk(chunk)
|
||||
|
||||
return self._build_events(
|
||||
chunk=chunk,
|
||||
completion_snapshot=self.__current_completion_snapshot,
|
||||
)
|
||||
|
||||
def _get_choice_state(self, choice: ChoiceChunk) -> ChoiceEventState:
|
||||
try:
|
||||
return self.__choice_event_states[choice.index]
|
||||
except IndexError:
|
||||
choice_state = ChoiceEventState(input_tools=self._input_tools)
|
||||
self.__choice_event_states.append(choice_state)
|
||||
return choice_state
|
||||
|
||||
def _accumulate_chunk(self, chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
|
||||
completion_snapshot = self.__current_completion_snapshot
|
||||
|
||||
if completion_snapshot is None:
|
||||
return _convert_initial_chunk_into_snapshot(chunk)
|
||||
|
||||
for choice in chunk.choices:
|
||||
try:
|
||||
choice_snapshot = completion_snapshot.choices[choice.index]
|
||||
previous_tool_calls = choice_snapshot.message.tool_calls or []
|
||||
|
||||
choice_snapshot.message = cast(
|
||||
ParsedChatCompletionMessageSnapshot,
|
||||
construct_type(
|
||||
type_=ParsedChatCompletionMessageSnapshot,
|
||||
value=accumulate_delta(
|
||||
cast(
|
||||
"dict[object, object]",
|
||||
model_dump(
|
||||
choice_snapshot.message,
|
||||
# we don't want to serialise / deserialise our custom properties
|
||||
# as they won't appear in the delta and we don't want to have to
|
||||
# continuosly reparse the content
|
||||
exclude={
|
||||
"parsed": True,
|
||||
"tool_calls": {
|
||||
idx: {"function": {"parsed_arguments": True}}
|
||||
for idx, _ in enumerate(choice_snapshot.message.tool_calls or [])
|
||||
},
|
||||
},
|
||||
),
|
||||
),
|
||||
cast("dict[object, object]", choice.delta.to_dict()),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
# ensure tools that have already been parsed are added back into the newly
|
||||
# constructed message snapshot
|
||||
for tool_index, prev_tool in enumerate(previous_tool_calls):
|
||||
new_tool = (choice_snapshot.message.tool_calls or [])[tool_index]
|
||||
|
||||
if prev_tool.type == "function":
|
||||
assert new_tool.type == "function"
|
||||
new_tool.function.parsed_arguments = prev_tool.function.parsed_arguments
|
||||
elif TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(prev_tool)
|
||||
except IndexError:
|
||||
choice_snapshot = cast(
|
||||
ParsedChoiceSnapshot,
|
||||
construct_type(
|
||||
type_=ParsedChoiceSnapshot,
|
||||
value={
|
||||
**choice.model_dump(exclude_unset=True, exclude={"delta"}),
|
||||
"message": choice.delta.to_dict(),
|
||||
},
|
||||
),
|
||||
)
|
||||
completion_snapshot.choices.append(choice_snapshot)
|
||||
|
||||
if choice.finish_reason:
|
||||
choice_snapshot.finish_reason = choice.finish_reason
|
||||
|
||||
if has_parseable_input(response_format=self._response_format, input_tools=self._input_tools):
|
||||
if choice.finish_reason == "length":
|
||||
raise LengthFinishReasonError()
|
||||
|
||||
if choice.finish_reason == "content_filter":
|
||||
raise ContentFilterFinishReasonError()
|
||||
|
||||
if (
|
||||
choice_snapshot.message.content
|
||||
and not choice_snapshot.message.refusal
|
||||
and is_given(self._rich_response_format)
|
||||
):
|
||||
choice_snapshot.message.parsed = from_json(
|
||||
bytes(choice_snapshot.message.content, "utf-8"),
|
||||
partial_mode=True,
|
||||
)
|
||||
|
||||
for tool_call_chunk in choice.delta.tool_calls or []:
|
||||
tool_call_snapshot = (choice_snapshot.message.tool_calls or [])[tool_call_chunk.index]
|
||||
|
||||
if tool_call_snapshot.type == "function":
|
||||
input_tool = get_input_tool_by_name(
|
||||
input_tools=self._input_tools, name=tool_call_snapshot.function.name
|
||||
)
|
||||
|
||||
if (
|
||||
input_tool
|
||||
and input_tool.get("function", {}).get("strict")
|
||||
and tool_call_snapshot.function.arguments
|
||||
):
|
||||
tool_call_snapshot.function.parsed_arguments = from_json(
|
||||
bytes(tool_call_snapshot.function.arguments, "utf-8"),
|
||||
partial_mode=True,
|
||||
)
|
||||
elif TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(tool_call_snapshot)
|
||||
|
||||
if choice.logprobs is not None:
|
||||
if choice_snapshot.logprobs is None:
|
||||
choice_snapshot.logprobs = build(
|
||||
ChoiceLogprobs,
|
||||
content=choice.logprobs.content,
|
||||
refusal=choice.logprobs.refusal,
|
||||
)
|
||||
else:
|
||||
if choice.logprobs.content:
|
||||
if choice_snapshot.logprobs.content is None:
|
||||
choice_snapshot.logprobs.content = []
|
||||
|
||||
choice_snapshot.logprobs.content.extend(choice.logprobs.content)
|
||||
|
||||
if choice.logprobs.refusal:
|
||||
if choice_snapshot.logprobs.refusal is None:
|
||||
choice_snapshot.logprobs.refusal = []
|
||||
|
||||
choice_snapshot.logprobs.refusal.extend(choice.logprobs.refusal)
|
||||
|
||||
completion_snapshot.usage = chunk.usage
|
||||
completion_snapshot.system_fingerprint = chunk.system_fingerprint
|
||||
|
||||
return completion_snapshot
|
||||
|
||||
def _build_events(
|
||||
self,
|
||||
*,
|
||||
chunk: ChatCompletionChunk,
|
||||
completion_snapshot: ParsedChatCompletionSnapshot,
|
||||
) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
|
||||
|
||||
events_to_fire.append(
|
||||
build(ChunkEvent, type="chunk", chunk=chunk, snapshot=completion_snapshot),
|
||||
)
|
||||
|
||||
for choice in chunk.choices:
|
||||
choice_state = self._get_choice_state(choice)
|
||||
choice_snapshot = completion_snapshot.choices[choice.index]
|
||||
|
||||
if choice.delta.content is not None and choice_snapshot.message.content is not None:
|
||||
events_to_fire.append(
|
||||
build(
|
||||
ContentDeltaEvent,
|
||||
type="content.delta",
|
||||
delta=choice.delta.content,
|
||||
snapshot=choice_snapshot.message.content,
|
||||
parsed=choice_snapshot.message.parsed,
|
||||
)
|
||||
)
|
||||
|
||||
if choice.delta.refusal is not None and choice_snapshot.message.refusal is not None:
|
||||
events_to_fire.append(
|
||||
build(
|
||||
RefusalDeltaEvent,
|
||||
type="refusal.delta",
|
||||
delta=choice.delta.refusal,
|
||||
snapshot=choice_snapshot.message.refusal,
|
||||
)
|
||||
)
|
||||
|
||||
if choice.delta.tool_calls:
|
||||
tool_calls = choice_snapshot.message.tool_calls
|
||||
assert tool_calls is not None
|
||||
|
||||
for tool_call_delta in choice.delta.tool_calls:
|
||||
tool_call = tool_calls[tool_call_delta.index]
|
||||
|
||||
if tool_call.type == "function":
|
||||
assert tool_call_delta.function is not None
|
||||
events_to_fire.append(
|
||||
build(
|
||||
FunctionToolCallArgumentsDeltaEvent,
|
||||
type="tool_calls.function.arguments.delta",
|
||||
name=tool_call.function.name,
|
||||
index=tool_call_delta.index,
|
||||
arguments=tool_call.function.arguments,
|
||||
parsed_arguments=tool_call.function.parsed_arguments,
|
||||
arguments_delta=tool_call_delta.function.arguments or "",
|
||||
)
|
||||
)
|
||||
elif TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(tool_call)
|
||||
|
||||
if choice.logprobs is not None and choice_snapshot.logprobs is not None:
|
||||
if choice.logprobs.content and choice_snapshot.logprobs.content:
|
||||
events_to_fire.append(
|
||||
build(
|
||||
LogprobsContentDeltaEvent,
|
||||
type="logprobs.content.delta",
|
||||
content=choice.logprobs.content,
|
||||
snapshot=choice_snapshot.logprobs.content,
|
||||
),
|
||||
)
|
||||
|
||||
if choice.logprobs.refusal and choice_snapshot.logprobs.refusal:
|
||||
events_to_fire.append(
|
||||
build(
|
||||
LogprobsRefusalDeltaEvent,
|
||||
type="logprobs.refusal.delta",
|
||||
refusal=choice.logprobs.refusal,
|
||||
snapshot=choice_snapshot.logprobs.refusal,
|
||||
),
|
||||
)
|
||||
|
||||
events_to_fire.extend(
|
||||
choice_state.get_done_events(
|
||||
choice_chunk=choice,
|
||||
choice_snapshot=choice_snapshot,
|
||||
response_format=self._response_format,
|
||||
)
|
||||
)
|
||||
|
||||
return events_to_fire
|
||||
|
||||
|
||||
class ChoiceEventState:
|
||||
def __init__(self, *, input_tools: list[ChatCompletionToolParam]) -> None:
|
||||
self._input_tools = input_tools
|
||||
|
||||
self._content_done = False
|
||||
self._refusal_done = False
|
||||
self._logprobs_content_done = False
|
||||
self._logprobs_refusal_done = False
|
||||
self._done_tool_calls: set[int] = set()
|
||||
self.__current_tool_call_index: int | None = None
|
||||
|
||||
def get_done_events(
|
||||
self,
|
||||
*,
|
||||
choice_chunk: ChoiceChunk,
|
||||
choice_snapshot: ParsedChoiceSnapshot,
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
|
||||
|
||||
if choice_snapshot.finish_reason:
|
||||
events_to_fire.extend(
|
||||
self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
|
||||
)
|
||||
|
||||
if (
|
||||
self.__current_tool_call_index is not None
|
||||
and self.__current_tool_call_index not in self._done_tool_calls
|
||||
):
|
||||
self._add_tool_done_event(
|
||||
events_to_fire=events_to_fire,
|
||||
choice_snapshot=choice_snapshot,
|
||||
tool_index=self.__current_tool_call_index,
|
||||
)
|
||||
|
||||
for tool_call in choice_chunk.delta.tool_calls or []:
|
||||
if self.__current_tool_call_index != tool_call.index:
|
||||
events_to_fire.extend(
|
||||
self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
|
||||
)
|
||||
|
||||
if self.__current_tool_call_index is not None:
|
||||
self._add_tool_done_event(
|
||||
events_to_fire=events_to_fire,
|
||||
choice_snapshot=choice_snapshot,
|
||||
tool_index=self.__current_tool_call_index,
|
||||
)
|
||||
|
||||
self.__current_tool_call_index = tool_call.index
|
||||
|
||||
return events_to_fire
|
||||
|
||||
def _content_done_events(
|
||||
self,
|
||||
*,
|
||||
choice_snapshot: ParsedChoiceSnapshot,
|
||||
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
|
||||
) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
|
||||
events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
|
||||
|
||||
if choice_snapshot.message.content and not self._content_done:
|
||||
self._content_done = True
|
||||
|
||||
parsed = maybe_parse_content(
|
||||
response_format=response_format,
|
||||
message=choice_snapshot.message,
|
||||
)
|
||||
|
||||
# update the parsed content to now use the richer `response_format`
|
||||
# as opposed to the raw JSON-parsed object as the content is now
|
||||
# complete and can be fully validated.
|
||||
choice_snapshot.message.parsed = parsed
|
||||
|
||||
events_to_fire.append(
|
||||
build(
|
||||
# we do this dance so that when the `ContentDoneEvent` instance
|
||||
# is printed at runtime the class name will include the solved
|
||||
# type variable, e.g. `ContentDoneEvent[MyModelType]`
|
||||
cast( # pyright: ignore[reportUnnecessaryCast]
|
||||
"type[ContentDoneEvent[ResponseFormatT]]",
|
||||
cast(Any, ContentDoneEvent)[solve_response_format_t(response_format)],
|
||||
),
|
||||
type="content.done",
|
||||
content=choice_snapshot.message.content,
|
||||
parsed=parsed,
|
||||
),
|
||||
)
|
||||
|
||||
if choice_snapshot.message.refusal is not None and not self._refusal_done:
|
||||
self._refusal_done = True
|
||||
events_to_fire.append(
|
||||
build(RefusalDoneEvent, type="refusal.done", refusal=choice_snapshot.message.refusal),
|
||||
)
|
||||
|
||||
if (
|
||||
choice_snapshot.logprobs is not None
|
||||
and choice_snapshot.logprobs.content is not None
|
||||
and not self._logprobs_content_done
|
||||
):
|
||||
self._logprobs_content_done = True
|
||||
events_to_fire.append(
|
||||
build(LogprobsContentDoneEvent, type="logprobs.content.done", content=choice_snapshot.logprobs.content),
|
||||
)
|
||||
|
||||
if (
|
||||
choice_snapshot.logprobs is not None
|
||||
and choice_snapshot.logprobs.refusal is not None
|
||||
and not self._logprobs_refusal_done
|
||||
):
|
||||
self._logprobs_refusal_done = True
|
||||
events_to_fire.append(
|
||||
build(LogprobsRefusalDoneEvent, type="logprobs.refusal.done", refusal=choice_snapshot.logprobs.refusal),
|
||||
)
|
||||
|
||||
return events_to_fire
|
||||
|
||||
def _add_tool_done_event(
|
||||
self,
|
||||
*,
|
||||
events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]],
|
||||
choice_snapshot: ParsedChoiceSnapshot,
|
||||
tool_index: int,
|
||||
) -> None:
|
||||
if tool_index in self._done_tool_calls:
|
||||
return
|
||||
|
||||
self._done_tool_calls.add(tool_index)
|
||||
|
||||
assert choice_snapshot.message.tool_calls is not None
|
||||
tool_call_snapshot = choice_snapshot.message.tool_calls[tool_index]
|
||||
|
||||
if tool_call_snapshot.type == "function":
|
||||
parsed_arguments = parse_function_tool_arguments(
|
||||
input_tools=self._input_tools, function=tool_call_snapshot.function
|
||||
)
|
||||
|
||||
# update the parsed content to potentially use a richer type
|
||||
# as opposed to the raw JSON-parsed object as the content is now
|
||||
# complete and can be fully validated.
|
||||
tool_call_snapshot.function.parsed_arguments = parsed_arguments
|
||||
|
||||
events_to_fire.append(
|
||||
build(
|
||||
FunctionToolCallArgumentsDoneEvent,
|
||||
type="tool_calls.function.arguments.done",
|
||||
index=tool_index,
|
||||
name=tool_call_snapshot.function.name,
|
||||
arguments=tool_call_snapshot.function.arguments,
|
||||
parsed_arguments=parsed_arguments,
|
||||
)
|
||||
)
|
||||
elif TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(tool_call_snapshot)
|
||||
|
||||
|
||||
def _convert_initial_chunk_into_snapshot(chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
|
||||
data = chunk.to_dict()
|
||||
choices = cast("list[object]", data["choices"])
|
||||
|
||||
for choice in chunk.choices:
|
||||
choices[choice.index] = {
|
||||
**choice.model_dump(exclude_unset=True, exclude={"delta"}),
|
||||
"message": choice.delta.to_dict(),
|
||||
}
|
||||
|
||||
return cast(
|
||||
ParsedChatCompletionSnapshot,
|
||||
construct_type(
|
||||
type_=ParsedChatCompletionSnapshot,
|
||||
value={
|
||||
"system_fingerprint": None,
|
||||
**data,
|
||||
"object": "chat.completion",
|
||||
},
|
||||
),
|
||||
)
|
@ -1,123 +0,0 @@
|
||||
from typing import List, Union, Generic, Optional
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ._types import ParsedChatCompletionSnapshot
|
||||
from ...._models import BaseModel, GenericModel
|
||||
from ..._parsing import ResponseFormatT
|
||||
from ....types.chat import ChatCompletionChunk, ChatCompletionTokenLogprob
|
||||
|
||||
|
||||
class ChunkEvent(BaseModel):
|
||||
type: Literal["chunk"]
|
||||
|
||||
chunk: ChatCompletionChunk
|
||||
|
||||
snapshot: ParsedChatCompletionSnapshot
|
||||
|
||||
|
||||
class ContentDeltaEvent(BaseModel):
|
||||
"""This event is yielded for every chunk with `choice.delta.content` data."""
|
||||
|
||||
type: Literal["content.delta"]
|
||||
|
||||
delta: str
|
||||
|
||||
snapshot: str
|
||||
|
||||
parsed: Optional[object] = None
|
||||
|
||||
|
||||
class ContentDoneEvent(GenericModel, Generic[ResponseFormatT]):
|
||||
type: Literal["content.done"]
|
||||
|
||||
content: str
|
||||
|
||||
parsed: Optional[ResponseFormatT] = None
|
||||
|
||||
|
||||
class RefusalDeltaEvent(BaseModel):
|
||||
type: Literal["refusal.delta"]
|
||||
|
||||
delta: str
|
||||
|
||||
snapshot: str
|
||||
|
||||
|
||||
class RefusalDoneEvent(BaseModel):
|
||||
type: Literal["refusal.done"]
|
||||
|
||||
refusal: str
|
||||
|
||||
|
||||
class FunctionToolCallArgumentsDeltaEvent(BaseModel):
|
||||
type: Literal["tool_calls.function.arguments.delta"]
|
||||
|
||||
name: str
|
||||
|
||||
index: int
|
||||
|
||||
arguments: str
|
||||
"""Accumulated raw JSON string"""
|
||||
|
||||
parsed_arguments: object
|
||||
"""The parsed arguments so far"""
|
||||
|
||||
arguments_delta: str
|
||||
"""The JSON string delta"""
|
||||
|
||||
|
||||
class FunctionToolCallArgumentsDoneEvent(BaseModel):
|
||||
type: Literal["tool_calls.function.arguments.done"]
|
||||
|
||||
name: str
|
||||
|
||||
index: int
|
||||
|
||||
arguments: str
|
||||
"""Accumulated raw JSON string"""
|
||||
|
||||
parsed_arguments: object
|
||||
"""The parsed arguments"""
|
||||
|
||||
|
||||
class LogprobsContentDeltaEvent(BaseModel):
|
||||
type: Literal["logprobs.content.delta"]
|
||||
|
||||
content: List[ChatCompletionTokenLogprob]
|
||||
|
||||
snapshot: List[ChatCompletionTokenLogprob]
|
||||
|
||||
|
||||
class LogprobsContentDoneEvent(BaseModel):
|
||||
type: Literal["logprobs.content.done"]
|
||||
|
||||
content: List[ChatCompletionTokenLogprob]
|
||||
|
||||
|
||||
class LogprobsRefusalDeltaEvent(BaseModel):
|
||||
type: Literal["logprobs.refusal.delta"]
|
||||
|
||||
refusal: List[ChatCompletionTokenLogprob]
|
||||
|
||||
snapshot: List[ChatCompletionTokenLogprob]
|
||||
|
||||
|
||||
class LogprobsRefusalDoneEvent(BaseModel):
|
||||
type: Literal["logprobs.refusal.done"]
|
||||
|
||||
refusal: List[ChatCompletionTokenLogprob]
|
||||
|
||||
|
||||
ChatCompletionStreamEvent = Union[
|
||||
ChunkEvent,
|
||||
ContentDeltaEvent,
|
||||
ContentDoneEvent[ResponseFormatT],
|
||||
RefusalDeltaEvent,
|
||||
RefusalDoneEvent,
|
||||
FunctionToolCallArgumentsDeltaEvent,
|
||||
FunctionToolCallArgumentsDoneEvent,
|
||||
LogprobsContentDeltaEvent,
|
||||
LogprobsContentDoneEvent,
|
||||
LogprobsRefusalDeltaEvent,
|
||||
LogprobsRefusalDoneEvent,
|
||||
]
|
@ -1,20 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from ....types.chat import ParsedChoice, ParsedChatCompletion, ParsedChatCompletionMessage
|
||||
|
||||
ParsedChatCompletionSnapshot: TypeAlias = ParsedChatCompletion[object]
|
||||
"""Snapshot type representing an in-progress accumulation of
|
||||
a `ParsedChatCompletion` object.
|
||||
"""
|
||||
|
||||
ParsedChatCompletionMessageSnapshot: TypeAlias = ParsedChatCompletionMessage[object]
|
||||
"""Snapshot type representing an in-progress accumulation of
|
||||
a `ParsedChatCompletionMessage` object.
|
||||
|
||||
If the content has been fully accumulated, the `.parsed` content will be
|
||||
the `response_format` instance, otherwise it'll be the raw JSON parsed version.
|
||||
"""
|
||||
|
||||
ParsedChoiceSnapshot: TypeAlias = ParsedChoice[object]
|
@ -56,14 +56,6 @@ from .batches import (
|
||||
BatchesWithStreamingResponse,
|
||||
AsyncBatchesWithStreamingResponse,
|
||||
)
|
||||
from .uploads import (
|
||||
Uploads,
|
||||
AsyncUploads,
|
||||
UploadsWithRawResponse,
|
||||
AsyncUploadsWithRawResponse,
|
||||
UploadsWithStreamingResponse,
|
||||
AsyncUploadsWithStreamingResponse,
|
||||
)
|
||||
from .embeddings import (
|
||||
Embeddings,
|
||||
AsyncEmbeddings,
|
||||
@ -164,10 +156,4 @@ __all__ = [
|
||||
"AsyncBatchesWithRawResponse",
|
||||
"BatchesWithStreamingResponse",
|
||||
"AsyncBatchesWithStreamingResponse",
|
||||
"Uploads",
|
||||
"AsyncUploads",
|
||||
"UploadsWithRawResponse",
|
||||
"AsyncUploadsWithRawResponse",
|
||||
"UploadsWithStreamingResponse",
|
||||
"AsyncUploadsWithStreamingResponse",
|
||||
]
|
||||
|
@ -22,8 +22,9 @@ from ..._response import (
|
||||
async_to_custom_streamed_response_wrapper,
|
||||
)
|
||||
from ...types.audio import speech_create_params
|
||||
from ..._base_client import make_request_options
|
||||
from ...types.audio.speech_model import SpeechModel
|
||||
from ..._base_client import (
|
||||
make_request_options,
|
||||
)
|
||||
|
||||
__all__ = ["Speech", "AsyncSpeech"]
|
||||
|
||||
@ -41,7 +42,7 @@ class Speech(SyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
input: str,
|
||||
model: Union[str, SpeechModel],
|
||||
model: Union[str, Literal["tts-1", "tts-1-hd"]],
|
||||
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
|
||||
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
|
||||
speed: float | NotGiven = NOT_GIVEN,
|
||||
@ -114,7 +115,7 @@ class AsyncSpeech(AsyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
input: str,
|
||||
model: Union[str, SpeechModel],
|
||||
model: Union[str, Literal["tts-1", "tts-1-hd"]],
|
||||
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
|
||||
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
|
||||
speed: float | NotGiven = NOT_GIVEN,
|
||||
|
@ -19,8 +19,9 @@ from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...types.audio import transcription_create_params
|
||||
from ..._base_client import make_request_options
|
||||
from ...types.audio_model import AudioModel
|
||||
from ..._base_client import (
|
||||
make_request_options,
|
||||
)
|
||||
from ...types.audio.transcription import Transcription
|
||||
|
||||
__all__ = ["Transcriptions", "AsyncTranscriptions"]
|
||||
@ -39,7 +40,7 @@ class Transcriptions(SyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
model: Union[str, AudioModel],
|
||||
model: Union[str, Literal["whisper-1"]],
|
||||
language: str | NotGiven = NOT_GIVEN,
|
||||
prompt: str | NotGiven = NOT_GIVEN,
|
||||
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
|
||||
@ -135,7 +136,7 @@ class AsyncTranscriptions(AsyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
model: Union[str, AudioModel],
|
||||
model: Union[str, Literal["whisper-1"]],
|
||||
language: str | NotGiven = NOT_GIVEN,
|
||||
prompt: str | NotGiven = NOT_GIVEN,
|
||||
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
|
||||
|
@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union, Mapping, cast
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
@ -18,8 +19,9 @@ from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...types.audio import translation_create_params
|
||||
from ..._base_client import make_request_options
|
||||
from ...types.audio_model import AudioModel
|
||||
from ..._base_client import (
|
||||
make_request_options,
|
||||
)
|
||||
from ...types.audio.translation import Translation
|
||||
|
||||
__all__ = ["Translations", "AsyncTranslations"]
|
||||
@ -38,7 +40,7 @@ class Translations(SyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
model: Union[str, AudioModel],
|
||||
model: Union[str, Literal["whisper-1"]],
|
||||
prompt: str | NotGiven = NOT_GIVEN,
|
||||
response_format: str | NotGiven = NOT_GIVEN,
|
||||
temperature: float | NotGiven = NOT_GIVEN,
|
||||
@ -119,7 +121,7 @@ class AsyncTranslations(AsyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
model: Union[str, AudioModel],
|
||||
model: Union[str, Literal["whisper-1"]],
|
||||
prompt: str | NotGiven = NOT_GIVEN,
|
||||
response_format: str | NotGiven = NOT_GIVEN,
|
||||
temperature: float | NotGiven = NOT_GIVEN,
|
||||
|
@ -22,8 +22,10 @@ from ...types.beta import (
|
||||
assistant_create_params,
|
||||
assistant_update_params,
|
||||
)
|
||||
from ..._base_client import AsyncPaginator, make_request_options
|
||||
from ...types.chat_model import ChatModel
|
||||
from ..._base_client import (
|
||||
AsyncPaginator,
|
||||
make_request_options,
|
||||
)
|
||||
from ...types.beta.assistant import Assistant
|
||||
from ...types.beta.assistant_deleted import AssistantDeleted
|
||||
from ...types.beta.assistant_tool_param import AssistantToolParam
|
||||
@ -44,7 +46,31 @@ class Assistants(SyncAPIResource):
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
model: Union[str, ChatModel],
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
],
|
||||
description: Optional[str] | NotGiven = NOT_GIVEN,
|
||||
instructions: Optional[str] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
@ -88,11 +114,6 @@ class Assistants(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -238,11 +259,6 @@ class Assistants(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -419,7 +435,31 @@ class AsyncAssistants(AsyncAPIResource):
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
model: Union[str, ChatModel],
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
],
|
||||
description: Optional[str] | NotGiven = NOT_GIVEN,
|
||||
instructions: Optional[str] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
@ -463,11 +503,6 @@ class AsyncAssistants(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -613,11 +648,6 @@ class AsyncAssistants(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
|
@ -11,7 +11,6 @@ from .threads import (
|
||||
AsyncThreadsWithStreamingResponse,
|
||||
)
|
||||
from ..._compat import cached_property
|
||||
from .chat.chat import Chat, AsyncChat
|
||||
from .assistants import (
|
||||
Assistants,
|
||||
AsyncAssistants,
|
||||
@ -36,10 +35,6 @@ __all__ = ["Beta", "AsyncBeta"]
|
||||
|
||||
|
||||
class Beta(SyncAPIResource):
|
||||
@cached_property
|
||||
def chat(self) -> Chat:
|
||||
return Chat(self._client)
|
||||
|
||||
@cached_property
|
||||
def vector_stores(self) -> VectorStores:
|
||||
return VectorStores(self._client)
|
||||
@ -62,10 +57,6 @@ class Beta(SyncAPIResource):
|
||||
|
||||
|
||||
class AsyncBeta(AsyncAPIResource):
|
||||
@cached_property
|
||||
def chat(self) -> AsyncChat:
|
||||
return AsyncChat(self._client)
|
||||
|
||||
@cached_property
|
||||
def vector_stores(self) -> AsyncVectorStores:
|
||||
return AsyncVectorStores(self._client)
|
||||
|
@ -1,11 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .chat import Chat, AsyncChat
|
||||
from .completions import Completions, AsyncCompletions
|
||||
|
||||
__all__ = [
|
||||
"Completions",
|
||||
"AsyncCompletions",
|
||||
"Chat",
|
||||
"AsyncChat",
|
||||
]
|
@ -1,21 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ...._compat import cached_property
|
||||
from .completions import Completions, AsyncCompletions
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
|
||||
__all__ = ["Chat", "AsyncChat"]
|
||||
|
||||
|
||||
class Chat(SyncAPIResource):
|
||||
@cached_property
|
||||
def completions(self) -> Completions:
|
||||
return Completions(self._client)
|
||||
|
||||
|
||||
class AsyncChat(AsyncAPIResource):
|
||||
@cached_property
|
||||
def completions(self) -> AsyncCompletions:
|
||||
return AsyncCompletions(self._client)
|
@ -1,455 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Dict, List, Union, Iterable, Optional
|
||||
from functools import partial
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._streaming import Stream
|
||||
from ....types.chat import completion_create_params
|
||||
from ....lib._parsing import (
|
||||
ResponseFormatT,
|
||||
validate_input_tools as _validate_input_tools,
|
||||
parse_chat_completion as _parse_chat_completion,
|
||||
type_to_response_format_param as _type_to_response_format,
|
||||
)
|
||||
from ....types.chat_model import ChatModel
|
||||
from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
|
||||
from ....types.chat.chat_completion_chunk import ChatCompletionChunk
|
||||
from ....types.chat.parsed_chat_completion import ParsedChatCompletion
|
||||
from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
|
||||
from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
||||
from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
|
||||
from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
|
||||
|
||||
__all__ = ["Completions", "AsyncCompletions"]
|
||||
|
||||
|
||||
class Completions(SyncAPIResource):
|
||||
def parse(
|
||||
self,
|
||||
*,
|
||||
messages: Iterable[ChatCompletionMessageParam],
|
||||
model: Union[str, ChatModel],
|
||||
response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
|
||||
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||||
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||||
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||||
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
|
||||
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||||
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||||
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
user: str | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> ParsedChatCompletion[ResponseFormatT]:
|
||||
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
|
||||
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
|
||||
|
||||
You can pass a pydantic model to this method and it will automatically convert the model
|
||||
into a JSON schema, send it to the API and parse the response content back into the given model.
|
||||
|
||||
This method will also automatically parse `function` tool calls if:
|
||||
- You use the `openai.pydantic_function_tool()` helper method
|
||||
- You mark your tool schema with `"strict": True`
|
||||
|
||||
Example usage:
|
||||
```py
|
||||
from pydantic import BaseModel
|
||||
from openai import OpenAI
|
||||
|
||||
|
||||
class Step(BaseModel):
|
||||
explanation: str
|
||||
output: str
|
||||
|
||||
|
||||
class MathResponse(BaseModel):
|
||||
steps: List[Step]
|
||||
final_answer: str
|
||||
|
||||
|
||||
client = OpenAI()
|
||||
completion = client.beta.chat.completions.parse(
|
||||
model="gpt-4o-2024-08-06",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful math tutor."},
|
||||
{"role": "user", "content": "solve 8x + 31 = 2"},
|
||||
],
|
||||
response_format=MathResponse,
|
||||
)
|
||||
|
||||
message = completion.choices[0].message
|
||||
if message.parsed:
|
||||
print(message.parsed.steps)
|
||||
print("answer: ", message.parsed.final_answer)
|
||||
```
|
||||
"""
|
||||
_validate_input_tools(tools)
|
||||
|
||||
extra_headers = {
|
||||
"X-Stainless-Helper-Method": "beta.chat.completions.parse",
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
|
||||
raw_completion = self._client.chat.completions.create(
|
||||
messages=messages,
|
||||
model=model,
|
||||
response_format=_type_to_response_format(response_format),
|
||||
frequency_penalty=frequency_penalty,
|
||||
function_call=function_call,
|
||||
functions=functions,
|
||||
logit_bias=logit_bias,
|
||||
logprobs=logprobs,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
parallel_tool_calls=parallel_tool_calls,
|
||||
presence_penalty=presence_penalty,
|
||||
seed=seed,
|
||||
service_tier=service_tier,
|
||||
stop=stop,
|
||||
stream_options=stream_options,
|
||||
temperature=temperature,
|
||||
tool_choice=tool_choice,
|
||||
tools=tools,
|
||||
top_logprobs=top_logprobs,
|
||||
top_p=top_p,
|
||||
user=user,
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
)
|
||||
return _parse_chat_completion(
|
||||
response_format=response_format,
|
||||
chat_completion=raw_completion,
|
||||
input_tools=tools,
|
||||
)
|
||||
|
||||
def stream(
|
||||
self,
|
||||
*,
|
||||
messages: Iterable[ChatCompletionMessageParam],
|
||||
model: Union[str, ChatModel],
|
||||
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
|
||||
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||||
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||||
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||||
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
|
||||
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||||
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||||
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
user: str | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> ChatCompletionStreamManager[ResponseFormatT]:
|
||||
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
|
||||
and automatic accumulation of each delta.
|
||||
|
||||
This also supports all of the parsing utilities that `.parse()` does.
|
||||
|
||||
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
|
||||
|
||||
```py
|
||||
with client.beta.chat.completions.stream(
|
||||
model="gpt-4o-2024-08-06",
|
||||
messages=[...],
|
||||
) as stream:
|
||||
for event in stream:
|
||||
if event.type == "content.delta":
|
||||
print(event.delta, flush=True, end="")
|
||||
```
|
||||
|
||||
When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
|
||||
|
||||
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
|
||||
the context manager.
|
||||
"""
|
||||
extra_headers = {
|
||||
"X-Stainless-Helper-Method": "beta.chat.completions.stream",
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
|
||||
api_request: partial[Stream[ChatCompletionChunk]] = partial(
|
||||
self._client.chat.completions.create,
|
||||
messages=messages,
|
||||
model=model,
|
||||
stream=True,
|
||||
response_format=_type_to_response_format(response_format),
|
||||
frequency_penalty=frequency_penalty,
|
||||
function_call=function_call,
|
||||
functions=functions,
|
||||
logit_bias=logit_bias,
|
||||
logprobs=logprobs,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
parallel_tool_calls=parallel_tool_calls,
|
||||
presence_penalty=presence_penalty,
|
||||
seed=seed,
|
||||
service_tier=service_tier,
|
||||
stop=stop,
|
||||
stream_options=stream_options,
|
||||
temperature=temperature,
|
||||
tool_choice=tool_choice,
|
||||
tools=tools,
|
||||
top_logprobs=top_logprobs,
|
||||
top_p=top_p,
|
||||
user=user,
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
)
|
||||
return ChatCompletionStreamManager(
|
||||
api_request,
|
||||
response_format=response_format,
|
||||
input_tools=tools,
|
||||
)
|
||||
|
||||
|
||||
class AsyncCompletions(AsyncAPIResource):
|
||||
async def parse(
|
||||
self,
|
||||
*,
|
||||
messages: Iterable[ChatCompletionMessageParam],
|
||||
model: Union[str, ChatModel],
|
||||
response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
|
||||
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||||
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||||
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||||
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
|
||||
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||||
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||||
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
user: str | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> ParsedChatCompletion[ResponseFormatT]:
|
||||
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
|
||||
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
|
||||
|
||||
You can pass a pydantic model to this method and it will automatically convert the model
|
||||
into a JSON schema, send it to the API and parse the response content back into the given model.
|
||||
|
||||
This method will also automatically parse `function` tool calls if:
|
||||
- You use the `openai.pydantic_function_tool()` helper method
|
||||
- You mark your tool schema with `"strict": True`
|
||||
|
||||
Example usage:
|
||||
```py
|
||||
from pydantic import BaseModel
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
|
||||
class Step(BaseModel):
|
||||
explanation: str
|
||||
output: str
|
||||
|
||||
|
||||
class MathResponse(BaseModel):
|
||||
steps: List[Step]
|
||||
final_answer: str
|
||||
|
||||
|
||||
client = AsyncOpenAI()
|
||||
completion = await client.beta.chat.completions.parse(
|
||||
model="gpt-4o-2024-08-06",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful math tutor."},
|
||||
{"role": "user", "content": "solve 8x + 31 = 2"},
|
||||
],
|
||||
response_format=MathResponse,
|
||||
)
|
||||
|
||||
message = completion.choices[0].message
|
||||
if message.parsed:
|
||||
print(message.parsed.steps)
|
||||
print("answer: ", message.parsed.final_answer)
|
||||
```
|
||||
"""
|
||||
_validate_input_tools(tools)
|
||||
|
||||
extra_headers = {
|
||||
"X-Stainless-Helper-Method": "beta.chat.completions.parse",
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
|
||||
raw_completion = await self._client.chat.completions.create(
|
||||
messages=messages,
|
||||
model=model,
|
||||
response_format=_type_to_response_format(response_format),
|
||||
frequency_penalty=frequency_penalty,
|
||||
function_call=function_call,
|
||||
functions=functions,
|
||||
logit_bias=logit_bias,
|
||||
logprobs=logprobs,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
parallel_tool_calls=parallel_tool_calls,
|
||||
presence_penalty=presence_penalty,
|
||||
seed=seed,
|
||||
service_tier=service_tier,
|
||||
stop=stop,
|
||||
stream_options=stream_options,
|
||||
temperature=temperature,
|
||||
tool_choice=tool_choice,
|
||||
tools=tools,
|
||||
top_logprobs=top_logprobs,
|
||||
top_p=top_p,
|
||||
user=user,
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
)
|
||||
return _parse_chat_completion(
|
||||
response_format=response_format,
|
||||
chat_completion=raw_completion,
|
||||
input_tools=tools,
|
||||
)
|
||||
|
||||
def stream(
|
||||
self,
|
||||
*,
|
||||
messages: Iterable[ChatCompletionMessageParam],
|
||||
model: Union[str, ChatModel],
|
||||
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
|
||||
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||||
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||||
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||||
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
|
||||
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||||
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||||
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
user: str | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
|
||||
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
|
||||
and automatic accumulation of each delta.
|
||||
|
||||
This also supports all of the parsing utilities that `.parse()` does.
|
||||
|
||||
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
|
||||
|
||||
```py
|
||||
async with client.beta.chat.completions.stream(
|
||||
model="gpt-4o-2024-08-06",
|
||||
messages=[...],
|
||||
) as stream:
|
||||
async for event in stream:
|
||||
if event.type == "content.delta":
|
||||
print(event.delta, flush=True, end="")
|
||||
```
|
||||
|
||||
When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
|
||||
|
||||
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
|
||||
the context manager.
|
||||
"""
|
||||
_validate_input_tools(tools)
|
||||
|
||||
extra_headers = {
|
||||
"X-Stainless-Helper-Method": "beta.chat.completions.stream",
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
|
||||
api_request = self._client.chat.completions.create(
|
||||
messages=messages,
|
||||
model=model,
|
||||
stream=True,
|
||||
response_format=_type_to_response_format(response_format),
|
||||
frequency_penalty=frequency_penalty,
|
||||
function_call=function_call,
|
||||
functions=functions,
|
||||
logit_bias=logit_bias,
|
||||
logprobs=logprobs,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
parallel_tool_calls=parallel_tool_calls,
|
||||
presence_penalty=presence_penalty,
|
||||
seed=seed,
|
||||
service_tier=service_tier,
|
||||
stop=stop,
|
||||
stream_options=stream_options,
|
||||
temperature=temperature,
|
||||
tool_choice=tool_choice,
|
||||
tools=tools,
|
||||
top_logprobs=top_logprobs,
|
||||
top_p=top_p,
|
||||
user=user,
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
)
|
||||
return AsyncChatCompletionStreamManager(
|
||||
api_request,
|
||||
response_format=response_format,
|
||||
input_tools=tools,
|
||||
)
|
@ -30,7 +30,10 @@ from ....._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ....._streaming import Stream, AsyncStream
|
||||
from .....pagination import SyncCursorPage, AsyncCursorPage
|
||||
from ....._base_client import AsyncPaginator, make_request_options
|
||||
from ....._base_client import (
|
||||
AsyncPaginator,
|
||||
make_request_options,
|
||||
)
|
||||
from .....lib.streaming import (
|
||||
AssistantEventHandler,
|
||||
AssistantEventHandlerT,
|
||||
@ -39,7 +42,6 @@ from .....lib.streaming import (
|
||||
AsyncAssistantEventHandlerT,
|
||||
AsyncAssistantStreamManager,
|
||||
)
|
||||
from .....types.chat_model import ChatModel
|
||||
from .....types.beta.threads import (
|
||||
run_list_params,
|
||||
run_create_params,
|
||||
@ -80,7 +82,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
@ -145,11 +173,6 @@ class Runs(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -212,7 +235,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -280,11 +329,6 @@ class Runs(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -343,7 +387,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -411,11 +481,6 @@ class Runs(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -473,7 +538,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
|
||||
@ -718,8 +809,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -753,7 +869,6 @@ class Runs(SyncAPIResource):
|
||||
response_format=response_format,
|
||||
temperature=temperature,
|
||||
tool_choice=tool_choice,
|
||||
parallel_tool_calls=parallel_tool_calls,
|
||||
# We assume we are not streaming when polling
|
||||
stream=False,
|
||||
tools=tools,
|
||||
@ -786,8 +901,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -817,8 +957,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -848,8 +1013,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -894,7 +1084,6 @@ class Runs(SyncAPIResource):
|
||||
"stream": True,
|
||||
"tools": tools,
|
||||
"truncation_strategy": truncation_strategy,
|
||||
"parallel_tool_calls": parallel_tool_calls,
|
||||
"top_p": top_p,
|
||||
},
|
||||
run_create_params.RunCreateParams,
|
||||
@ -964,8 +1153,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -994,8 +1208,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -1024,8 +1263,33 @@ class Runs(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -1069,7 +1333,6 @@ class Runs(SyncAPIResource):
|
||||
"tool_choice": tool_choice,
|
||||
"stream": True,
|
||||
"tools": tools,
|
||||
"parallel_tool_calls": parallel_tool_calls,
|
||||
"truncation_strategy": truncation_strategy,
|
||||
"top_p": top_p,
|
||||
},
|
||||
@ -1393,7 +1656,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
@ -1458,11 +1747,6 @@ class AsyncRuns(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1525,7 +1809,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1593,11 +1903,6 @@ class AsyncRuns(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1656,7 +1961,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1724,11 +2055,6 @@ class AsyncRuns(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1786,7 +2112,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
|
||||
@ -2031,8 +2383,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -2066,7 +2443,6 @@ class AsyncRuns(AsyncAPIResource):
|
||||
response_format=response_format,
|
||||
temperature=temperature,
|
||||
tool_choice=tool_choice,
|
||||
parallel_tool_calls=parallel_tool_calls,
|
||||
# We assume we are not streaming when polling
|
||||
stream=False,
|
||||
tools=tools,
|
||||
@ -2099,8 +2475,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -2130,8 +2531,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -2161,8 +2587,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -2210,7 +2661,6 @@ class AsyncRuns(AsyncAPIResource):
|
||||
"tools": tools,
|
||||
"truncation_strategy": truncation_strategy,
|
||||
"top_p": top_p,
|
||||
"parallel_tool_calls": parallel_tool_calls,
|
||||
},
|
||||
run_create_params.RunCreateParams,
|
||||
),
|
||||
@ -2279,8 +2729,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -2309,8 +2784,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -2339,8 +2839,33 @@ class AsyncRuns(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
|
||||
@ -2386,7 +2911,6 @@ class AsyncRuns(AsyncAPIResource):
|
||||
"tool_choice": tool_choice,
|
||||
"stream": True,
|
||||
"tools": tools,
|
||||
"parallel_tool_calls": parallel_tool_calls,
|
||||
"truncation_strategy": truncation_strategy,
|
||||
"top_p": top_p,
|
||||
},
|
||||
|
@ -41,7 +41,9 @@ from ....types.beta import (
|
||||
thread_update_params,
|
||||
thread_create_and_run_params,
|
||||
)
|
||||
from ...._base_client import make_request_options
|
||||
from ...._base_client import (
|
||||
make_request_options,
|
||||
)
|
||||
from ....lib.streaming import (
|
||||
AssistantEventHandler,
|
||||
AssistantEventHandlerT,
|
||||
@ -50,7 +52,6 @@ from ....lib.streaming import (
|
||||
AsyncAssistantEventHandlerT,
|
||||
AsyncAssistantStreamManager,
|
||||
)
|
||||
from ....types.chat_model import ChatModel
|
||||
from ....types.beta.thread import Thread
|
||||
from ....types.beta.threads.run import Run
|
||||
from ....types.beta.thread_deleted import ThreadDeleted
|
||||
@ -263,7 +264,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
@ -323,11 +350,6 @@ class Threads(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -394,7 +416,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -457,11 +505,6 @@ class Threads(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -524,7 +567,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -587,11 +656,6 @@ class Threads(SyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -653,7 +717,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
|
||||
@ -711,7 +801,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -767,7 +883,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -796,7 +938,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -825,7 +993,33 @@ class Threads(SyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1086,7 +1280,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
@ -1146,11 +1366,6 @@ class AsyncThreads(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1217,7 +1432,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1280,11 +1521,6 @@ class AsyncThreads(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1347,7 +1583,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1410,11 +1672,6 @@ class AsyncThreads(AsyncAPIResource):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1476,7 +1733,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
|
||||
@ -1534,7 +1817,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1592,7 +1901,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1621,7 +1956,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
@ -1650,7 +2011,33 @@ class AsyncThreads(AsyncAPIResource):
|
||||
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
metadata: Optional[object] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
None,
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
|
||||
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||||
|
@ -2,12 +2,10 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from typing import Dict, List, Union, Iterable, Optional, overload
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
import pydantic
|
||||
|
||||
from ... import _legacy_response
|
||||
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
||||
@ -21,7 +19,9 @@ from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ..._streaming import Stream, AsyncStream
|
||||
from ...types.chat import completion_create_params
|
||||
from ..._base_client import make_request_options
|
||||
from ..._base_client import (
|
||||
make_request_options,
|
||||
)
|
||||
from ...types.chat_model import ChatModel
|
||||
from ...types.chat.chat_completion import ChatCompletion
|
||||
from ...types.chat.chat_completion_chunk import ChatCompletionChunk
|
||||
@ -144,16 +144,9 @@ class Completions(SyncAPIResource):
|
||||
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||||
|
||||
response_format: An object specifying the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
|
||||
[GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||||
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -178,7 +171,6 @@ class Completions(SyncAPIResource):
|
||||
exhausted.
|
||||
- If set to 'default', the request will be processed using the default service
|
||||
tier with a lower uptime SLA and no latency guarentee.
|
||||
- When not set, the default behavior is 'auto'.
|
||||
|
||||
When this parameter is set, the response body will include the `service_tier`
|
||||
utilized.
|
||||
@ -347,16 +339,9 @@ class Completions(SyncAPIResource):
|
||||
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||||
|
||||
response_format: An object specifying the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
|
||||
[GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||||
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -381,7 +366,6 @@ class Completions(SyncAPIResource):
|
||||
exhausted.
|
||||
- If set to 'default', the request will be processed using the default service
|
||||
tier with a lower uptime SLA and no latency guarentee.
|
||||
- When not set, the default behavior is 'auto'.
|
||||
|
||||
When this parameter is set, the response body will include the `service_tier`
|
||||
utilized.
|
||||
@ -543,16 +527,9 @@ class Completions(SyncAPIResource):
|
||||
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||||
|
||||
response_format: An object specifying the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
|
||||
[GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||||
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -577,7 +554,6 @@ class Completions(SyncAPIResource):
|
||||
exhausted.
|
||||
- If set to 'default', the request will be processed using the default service
|
||||
tier with a lower uptime SLA and no latency guarentee.
|
||||
- When not set, the default behavior is 'auto'.
|
||||
|
||||
When this parameter is set, the response body will include the `service_tier`
|
||||
utilized.
|
||||
@ -664,7 +640,6 @@ class Completions(SyncAPIResource):
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> ChatCompletion | Stream[ChatCompletionChunk]:
|
||||
validate_response_format(response_format)
|
||||
return self._post(
|
||||
"/chat/completions",
|
||||
body=maybe_transform(
|
||||
@ -815,16 +790,9 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||||
|
||||
response_format: An object specifying the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
|
||||
[GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||||
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -849,7 +817,6 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
exhausted.
|
||||
- If set to 'default', the request will be processed using the default service
|
||||
tier with a lower uptime SLA and no latency guarentee.
|
||||
- When not set, the default behavior is 'auto'.
|
||||
|
||||
When this parameter is set, the response body will include the `service_tier`
|
||||
utilized.
|
||||
@ -1018,16 +985,9 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||||
|
||||
response_format: An object specifying the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
|
||||
[GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||||
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1052,7 +1012,6 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
exhausted.
|
||||
- If set to 'default', the request will be processed using the default service
|
||||
tier with a lower uptime SLA and no latency guarentee.
|
||||
- When not set, the default behavior is 'auto'.
|
||||
|
||||
When this parameter is set, the response body will include the `service_tier`
|
||||
utilized.
|
||||
@ -1214,16 +1173,9 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||||
|
||||
response_format: An object specifying the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
|
||||
[GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||||
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -1248,7 +1200,6 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
exhausted.
|
||||
- If set to 'default', the request will be processed using the default service
|
||||
tier with a lower uptime SLA and no latency guarentee.
|
||||
- When not set, the default behavior is 'auto'.
|
||||
|
||||
When this parameter is set, the response body will include the `service_tier`
|
||||
utilized.
|
||||
@ -1335,7 +1286,6 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
|
||||
validate_response_format(response_format)
|
||||
return await self._post(
|
||||
"/chat/completions",
|
||||
body=await async_maybe_transform(
|
||||
@ -1409,10 +1359,3 @@ class AsyncCompletionsWithStreamingResponse:
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
completions.create,
|
||||
)
|
||||
|
||||
|
||||
def validate_response_format(response_format: object) -> None:
|
||||
if inspect.isclass(response_format) and issubclass(response_format, pydantic.BaseModel):
|
||||
raise TypeError(
|
||||
"You tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `beta.chat.completions.parse()` instead"
|
||||
)
|
||||
|
@ -5,11 +5,12 @@ from __future__ import annotations
|
||||
import time
|
||||
import typing_extensions
|
||||
from typing import Mapping, cast
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from .. import _legacy_response
|
||||
from ..types import FilePurpose, file_list_params, file_create_params
|
||||
from ..types import file_list_params, file_create_params
|
||||
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
|
||||
from .._utils import (
|
||||
extract_files,
|
||||
@ -34,7 +35,6 @@ from .._base_client import (
|
||||
)
|
||||
from ..types.file_object import FileObject
|
||||
from ..types.file_deleted import FileDeleted
|
||||
from ..types.file_purpose import FilePurpose
|
||||
|
||||
__all__ = ["Files", "AsyncFiles"]
|
||||
|
||||
@ -52,7 +52,7 @@ class Files(SyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
purpose: FilePurpose,
|
||||
purpose: Literal["assistants", "batch", "fine-tune", "vision"],
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
@ -334,7 +334,7 @@ class AsyncFiles(AsyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
purpose: FilePurpose,
|
||||
purpose: Literal["assistants", "batch", "fine-tune", "vision"],
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
|
@ -52,7 +52,7 @@ class Jobs(SyncAPIResource):
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
|
||||
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]],
|
||||
training_file: str,
|
||||
hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
|
||||
integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
|
||||
@ -77,7 +77,7 @@ class Jobs(SyncAPIResource):
|
||||
|
||||
Args:
|
||||
model: The name of the model to fine-tune. You can select one of the
|
||||
[supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
|
||||
[supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
|
||||
|
||||
training_file: The ID of an uploaded file that contains training data.
|
||||
|
||||
@ -107,7 +107,7 @@ class Jobs(SyncAPIResource):
|
||||
name.
|
||||
|
||||
For example, a `suffix` of "custom-model-name" would produce a model name like
|
||||
`ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
|
||||
`ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
|
||||
|
||||
validation_file: The ID of an uploaded file that contains validation data.
|
||||
|
||||
@ -332,7 +332,7 @@ class AsyncJobs(AsyncAPIResource):
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
|
||||
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]],
|
||||
training_file: str,
|
||||
hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
|
||||
integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
|
||||
@ -357,7 +357,7 @@ class AsyncJobs(AsyncAPIResource):
|
||||
|
||||
Args:
|
||||
model: The name of the model to fine-tune. You can select one of the
|
||||
[supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
|
||||
[supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
|
||||
|
||||
training_file: The ID of an uploaded file that contains training data.
|
||||
|
||||
@ -387,7 +387,7 @@ class AsyncJobs(AsyncAPIResource):
|
||||
name.
|
||||
|
||||
For example, a `suffix` of "custom-model-name" would produce a model name like
|
||||
`ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
|
||||
`ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
|
||||
|
||||
validation_file: The ID of an uploaded file that contains validation data.
|
||||
|
||||
|
@ -19,8 +19,9 @@ from .._utils import (
|
||||
from .._compat import cached_property
|
||||
from .._resource import SyncAPIResource, AsyncAPIResource
|
||||
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from .._base_client import make_request_options
|
||||
from ..types.image_model import ImageModel
|
||||
from .._base_client import (
|
||||
make_request_options,
|
||||
)
|
||||
from ..types.images_response import ImagesResponse
|
||||
|
||||
__all__ = ["Images", "AsyncImages"]
|
||||
@ -39,7 +40,7 @@ class Images(SyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
image: FileTypes,
|
||||
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
|
||||
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
|
||||
@ -114,7 +115,7 @@ class Images(SyncAPIResource):
|
||||
image: FileTypes,
|
||||
prompt: str,
|
||||
mask: FileTypes | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
|
||||
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
|
||||
@ -195,7 +196,7 @@ class Images(SyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
prompt: str,
|
||||
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
|
||||
@ -285,7 +286,7 @@ class AsyncImages(AsyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
image: FileTypes,
|
||||
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
|
||||
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
|
||||
@ -360,7 +361,7 @@ class AsyncImages(AsyncAPIResource):
|
||||
image: FileTypes,
|
||||
prompt: str,
|
||||
mask: FileTypes | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
|
||||
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
|
||||
@ -441,7 +442,7 @@ class AsyncImages(AsyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
prompt: str,
|
||||
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN,
|
||||
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||||
quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
|
||||
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
|
||||
|
@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Union
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
@ -16,8 +17,9 @@ from .._utils import (
|
||||
from .._compat import cached_property
|
||||
from .._resource import SyncAPIResource, AsyncAPIResource
|
||||
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from .._base_client import make_request_options
|
||||
from ..types.moderation_model import ModerationModel
|
||||
from .._base_client import (
|
||||
make_request_options,
|
||||
)
|
||||
from ..types.moderation_create_response import ModerationCreateResponse
|
||||
|
||||
__all__ = ["Moderations", "AsyncModerations"]
|
||||
@ -36,7 +38,7 @@ class Moderations(SyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
input: Union[str, List[str]],
|
||||
model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
@ -96,7 +98,7 @@ class AsyncModerations(AsyncAPIResource):
|
||||
self,
|
||||
*,
|
||||
input: Union[str, List[str]],
|
||||
model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN,
|
||||
model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
|
@ -1,33 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .parts import (
|
||||
Parts,
|
||||
AsyncParts,
|
||||
PartsWithRawResponse,
|
||||
AsyncPartsWithRawResponse,
|
||||
PartsWithStreamingResponse,
|
||||
AsyncPartsWithStreamingResponse,
|
||||
)
|
||||
from .uploads import (
|
||||
Uploads,
|
||||
AsyncUploads,
|
||||
UploadsWithRawResponse,
|
||||
AsyncUploadsWithRawResponse,
|
||||
UploadsWithStreamingResponse,
|
||||
AsyncUploadsWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Parts",
|
||||
"AsyncParts",
|
||||
"PartsWithRawResponse",
|
||||
"AsyncPartsWithRawResponse",
|
||||
"PartsWithStreamingResponse",
|
||||
"AsyncPartsWithStreamingResponse",
|
||||
"Uploads",
|
||||
"AsyncUploads",
|
||||
"UploadsWithRawResponse",
|
||||
"AsyncUploadsWithRawResponse",
|
||||
"UploadsWithStreamingResponse",
|
||||
"AsyncUploadsWithStreamingResponse",
|
||||
]
|
@ -1,188 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Mapping, cast
|
||||
|
||||
import httpx
|
||||
|
||||
from ... import _legacy_response
|
||||
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
|
||||
from ..._utils import (
|
||||
extract_files,
|
||||
maybe_transform,
|
||||
deepcopy_minimal,
|
||||
async_maybe_transform,
|
||||
)
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ..._base_client import make_request_options
|
||||
from ...types.uploads import part_create_params
|
||||
from ...types.uploads.upload_part import UploadPart
|
||||
|
||||
__all__ = ["Parts", "AsyncParts"]
|
||||
|
||||
|
||||
class Parts(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> PartsWithRawResponse:
|
||||
return PartsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> PartsWithStreamingResponse:
|
||||
return PartsWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
upload_id: str,
|
||||
*,
|
||||
data: FileTypes,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> UploadPart:
|
||||
"""
|
||||
Adds a
|
||||
[Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
|
||||
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
|
||||
A Part represents a chunk of bytes from the file you are trying to upload.
|
||||
|
||||
Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
|
||||
maximum of 8 GB.
|
||||
|
||||
It is possible to add multiple Parts in parallel. You can decide the intended
|
||||
order of the Parts when you
|
||||
[complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
|
||||
|
||||
Args:
|
||||
data: The chunk of bytes for this Part.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not upload_id:
|
||||
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
|
||||
body = deepcopy_minimal({"data": data})
|
||||
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
|
||||
return self._post(
|
||||
f"/uploads/{upload_id}/parts",
|
||||
body=maybe_transform(body, part_create_params.PartCreateParams),
|
||||
files=files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=UploadPart,
|
||||
)
|
||||
|
||||
|
||||
class AsyncParts(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncPartsWithRawResponse:
|
||||
return AsyncPartsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncPartsWithStreamingResponse:
|
||||
return AsyncPartsWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
upload_id: str,
|
||||
*,
|
||||
data: FileTypes,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> UploadPart:
|
||||
"""
|
||||
Adds a
|
||||
[Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
|
||||
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
|
||||
A Part represents a chunk of bytes from the file you are trying to upload.
|
||||
|
||||
Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
|
||||
maximum of 8 GB.
|
||||
|
||||
It is possible to add multiple Parts in parallel. You can decide the intended
|
||||
order of the Parts when you
|
||||
[complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
|
||||
|
||||
Args:
|
||||
data: The chunk of bytes for this Part.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not upload_id:
|
||||
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
|
||||
body = deepcopy_minimal({"data": data})
|
||||
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
f"/uploads/{upload_id}/parts",
|
||||
body=await async_maybe_transform(body, part_create_params.PartCreateParams),
|
||||
files=files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=UploadPart,
|
||||
)
|
||||
|
||||
|
||||
class PartsWithRawResponse:
|
||||
def __init__(self, parts: Parts) -> None:
|
||||
self._parts = parts
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
parts.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncPartsWithRawResponse:
|
||||
def __init__(self, parts: AsyncParts) -> None:
|
||||
self._parts = parts
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
parts.create,
|
||||
)
|
||||
|
||||
|
||||
class PartsWithStreamingResponse:
|
||||
def __init__(self, parts: Parts) -> None:
|
||||
self._parts = parts
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
parts.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncPartsWithStreamingResponse:
|
||||
def __init__(self, parts: AsyncParts) -> None:
|
||||
self._parts = parts
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
parts.create,
|
||||
)
|
@ -1,694 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import logging
|
||||
import builtins
|
||||
from typing import List, overload
|
||||
from pathlib import Path
|
||||
|
||||
import anyio
|
||||
import httpx
|
||||
|
||||
from ... import _legacy_response
|
||||
from .parts import (
|
||||
Parts,
|
||||
AsyncParts,
|
||||
PartsWithRawResponse,
|
||||
AsyncPartsWithRawResponse,
|
||||
PartsWithStreamingResponse,
|
||||
AsyncPartsWithStreamingResponse,
|
||||
)
|
||||
from ...types import FilePurpose, upload_create_params, upload_complete_params
|
||||
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
||||
from ..._utils import (
|
||||
maybe_transform,
|
||||
async_maybe_transform,
|
||||
)
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ..._base_client import make_request_options
|
||||
from ...types.upload import Upload
|
||||
from ...types.file_purpose import FilePurpose
|
||||
|
||||
__all__ = ["Uploads", "AsyncUploads"]
|
||||
|
||||
|
||||
# 64MB
|
||||
DEFAULT_PART_SIZE = 64 * 1024 * 1024
|
||||
|
||||
log: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Uploads(SyncAPIResource):
|
||||
@cached_property
|
||||
def parts(self) -> Parts:
|
||||
return Parts(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> UploadsWithRawResponse:
|
||||
return UploadsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> UploadsWithStreamingResponse:
|
||||
return UploadsWithStreamingResponse(self)
|
||||
|
||||
@overload
|
||||
def upload_file_chunked(
|
||||
self,
|
||||
*,
|
||||
file: os.PathLike[str],
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
bytes: int | None = None,
|
||||
part_size: int | None = None,
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Splits a file into multiple 64MB parts and uploads them sequentially."""
|
||||
|
||||
@overload
|
||||
def upload_file_chunked(
|
||||
self,
|
||||
*,
|
||||
file: bytes,
|
||||
filename: str,
|
||||
bytes: int,
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
part_size: int | None = None,
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Splits an in-memory file into multiple 64MB parts and uploads them sequentially."""
|
||||
|
||||
def upload_file_chunked(
|
||||
self,
|
||||
*,
|
||||
file: os.PathLike[str] | bytes,
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
filename: str | None = None,
|
||||
bytes: int | None = None,
|
||||
part_size: int | None = None,
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Splits the given file into multiple parts and uploads them sequentially.
|
||||
|
||||
```py
|
||||
from pathlib import Path
|
||||
|
||||
client.uploads.upload_file(
|
||||
file=Path("my-paper.pdf"),
|
||||
mime_type="pdf",
|
||||
purpose="assistants",
|
||||
)
|
||||
```
|
||||
"""
|
||||
if isinstance(file, builtins.bytes):
|
||||
if filename is None:
|
||||
raise TypeError("The `filename` argument must be given for in-memory files")
|
||||
|
||||
if bytes is None:
|
||||
raise TypeError("The `bytes` argument must be given for in-memory files")
|
||||
else:
|
||||
if not isinstance(file, Path):
|
||||
file = Path(file)
|
||||
|
||||
if not filename:
|
||||
filename = file.name
|
||||
|
||||
if bytes is None:
|
||||
bytes = file.stat().st_size
|
||||
|
||||
upload = self.create(
|
||||
bytes=bytes,
|
||||
filename=filename,
|
||||
mime_type=mime_type,
|
||||
purpose=purpose,
|
||||
)
|
||||
|
||||
part_ids: list[str] = []
|
||||
|
||||
if part_size is None:
|
||||
part_size = DEFAULT_PART_SIZE
|
||||
|
||||
if isinstance(file, builtins.bytes):
|
||||
buf: io.FileIO | io.BytesIO = io.BytesIO(file)
|
||||
else:
|
||||
buf = io.FileIO(file)
|
||||
|
||||
try:
|
||||
while True:
|
||||
data = buf.read(part_size)
|
||||
if not data:
|
||||
# EOF
|
||||
break
|
||||
|
||||
part = self.parts.create(upload_id=upload.id, data=data)
|
||||
log.info("Uploaded part %s for upload %s", part.id, upload.id)
|
||||
part_ids.append(part.id)
|
||||
except Exception:
|
||||
buf.close()
|
||||
raise
|
||||
|
||||
return self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5)
|
||||
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
bytes: int,
|
||||
filename: str,
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""
|
||||
Creates an intermediate
|
||||
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object
|
||||
that you can add
|
||||
[Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.
|
||||
Currently, an Upload can accept at most 8 GB in total and expires after an hour
|
||||
after you create it.
|
||||
|
||||
Once you complete the Upload, we will create a
|
||||
[File](https://platform.openai.com/docs/api-reference/files/object) object that
|
||||
contains all the parts you uploaded. This File is usable in the rest of our
|
||||
platform as a regular File object.
|
||||
|
||||
For certain `purpose`s, the correct `mime_type` must be specified. Please refer
|
||||
to documentation for the supported MIME types for your use case:
|
||||
|
||||
- [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files)
|
||||
|
||||
For guidance on the proper filename extensions for each purpose, please follow
|
||||
the documentation on
|
||||
[creating a File](https://platform.openai.com/docs/api-reference/files/create).
|
||||
|
||||
Args:
|
||||
bytes: The number of bytes in the file you are uploading.
|
||||
|
||||
filename: The name of the file to upload.
|
||||
|
||||
mime_type: The MIME type of the file.
|
||||
|
||||
This must fall within the supported MIME types for your file purpose. See the
|
||||
supported MIME types for assistants and vision.
|
||||
|
||||
purpose: The intended purpose of the uploaded file.
|
||||
|
||||
See the
|
||||
[documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
return self._post(
|
||||
"/uploads",
|
||||
body=maybe_transform(
|
||||
{
|
||||
"bytes": bytes,
|
||||
"filename": filename,
|
||||
"mime_type": mime_type,
|
||||
"purpose": purpose,
|
||||
},
|
||||
upload_create_params.UploadCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Upload,
|
||||
)
|
||||
|
||||
def cancel(
|
||||
self,
|
||||
upload_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Cancels the Upload.
|
||||
|
||||
No Parts may be added after an Upload is cancelled.
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not upload_id:
|
||||
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
|
||||
return self._post(
|
||||
f"/uploads/{upload_id}/cancel",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Upload,
|
||||
)
|
||||
|
||||
def complete(
|
||||
self,
|
||||
upload_id: str,
|
||||
*,
|
||||
part_ids: List[str],
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""
|
||||
Completes the
|
||||
[Upload](https://platform.openai.com/docs/api-reference/uploads/object).
|
||||
|
||||
Within the returned Upload object, there is a nested
|
||||
[File](https://platform.openai.com/docs/api-reference/files/object) object that
|
||||
is ready to use in the rest of the platform.
|
||||
|
||||
You can specify the order of the Parts by passing in an ordered list of the Part
|
||||
IDs.
|
||||
|
||||
The number of bytes uploaded upon completion must match the number of bytes
|
||||
initially specified when creating the Upload object. No Parts may be added after
|
||||
an Upload is completed.
|
||||
|
||||
Args:
|
||||
part_ids: The ordered list of Part IDs.
|
||||
|
||||
md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
|
||||
matches what you expect.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not upload_id:
|
||||
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
|
||||
return self._post(
|
||||
f"/uploads/{upload_id}/complete",
|
||||
body=maybe_transform(
|
||||
{
|
||||
"part_ids": part_ids,
|
||||
"md5": md5,
|
||||
},
|
||||
upload_complete_params.UploadCompleteParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Upload,
|
||||
)
|
||||
|
||||
|
||||
class AsyncUploads(AsyncAPIResource):
|
||||
@cached_property
|
||||
def parts(self) -> AsyncParts:
|
||||
return AsyncParts(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncUploadsWithRawResponse:
|
||||
return AsyncUploadsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncUploadsWithStreamingResponse:
|
||||
return AsyncUploadsWithStreamingResponse(self)
|
||||
|
||||
@overload
|
||||
async def upload_file_chunked(
|
||||
self,
|
||||
*,
|
||||
file: os.PathLike[str],
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
bytes: int | None = None,
|
||||
part_size: int | None = None,
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Splits a file into multiple 64MB parts and uploads them sequentially."""
|
||||
|
||||
@overload
|
||||
async def upload_file_chunked(
|
||||
self,
|
||||
*,
|
||||
file: bytes,
|
||||
filename: str,
|
||||
bytes: int,
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
part_size: int | None = None,
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Splits an in-memory file into multiple 64MB parts and uploads them sequentially."""
|
||||
|
||||
async def upload_file_chunked(
|
||||
self,
|
||||
*,
|
||||
file: os.PathLike[str] | bytes,
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
filename: str | None = None,
|
||||
bytes: int | None = None,
|
||||
part_size: int | None = None,
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Splits the given file into multiple parts and uploads them sequentially.
|
||||
|
||||
```py
|
||||
from pathlib import Path
|
||||
|
||||
client.uploads.upload_file(
|
||||
file=Path("my-paper.pdf"),
|
||||
mime_type="pdf",
|
||||
purpose="assistants",
|
||||
)
|
||||
```
|
||||
"""
|
||||
if isinstance(file, builtins.bytes):
|
||||
if filename is None:
|
||||
raise TypeError("The `filename` argument must be given for in-memory files")
|
||||
|
||||
if bytes is None:
|
||||
raise TypeError("The `bytes` argument must be given for in-memory files")
|
||||
else:
|
||||
if not isinstance(file, anyio.Path):
|
||||
file = anyio.Path(file)
|
||||
|
||||
if not filename:
|
||||
filename = file.name
|
||||
|
||||
if bytes is None:
|
||||
stat = await file.stat()
|
||||
bytes = stat.st_size
|
||||
|
||||
upload = await self.create(
|
||||
bytes=bytes,
|
||||
filename=filename,
|
||||
mime_type=mime_type,
|
||||
purpose=purpose,
|
||||
)
|
||||
|
||||
part_ids: list[str] = []
|
||||
|
||||
if part_size is None:
|
||||
part_size = DEFAULT_PART_SIZE
|
||||
|
||||
if isinstance(file, anyio.Path):
|
||||
fd = await file.open("rb")
|
||||
async with fd:
|
||||
while True:
|
||||
data = await fd.read(part_size)
|
||||
if not data:
|
||||
# EOF
|
||||
break
|
||||
|
||||
part = await self.parts.create(upload_id=upload.id, data=data)
|
||||
log.info("Uploaded part %s for upload %s", part.id, upload.id)
|
||||
part_ids.append(part.id)
|
||||
else:
|
||||
buf = io.BytesIO(file)
|
||||
|
||||
try:
|
||||
while True:
|
||||
data = buf.read(part_size)
|
||||
if not data:
|
||||
# EOF
|
||||
break
|
||||
|
||||
part = await self.parts.create(upload_id=upload.id, data=data)
|
||||
log.info("Uploaded part %s for upload %s", part.id, upload.id)
|
||||
part_ids.append(part.id)
|
||||
except Exception:
|
||||
buf.close()
|
||||
raise
|
||||
|
||||
return await self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
bytes: int,
|
||||
filename: str,
|
||||
mime_type: str,
|
||||
purpose: FilePurpose,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""
|
||||
Creates an intermediate
|
||||
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object
|
||||
that you can add
|
||||
[Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.
|
||||
Currently, an Upload can accept at most 8 GB in total and expires after an hour
|
||||
after you create it.
|
||||
|
||||
Once you complete the Upload, we will create a
|
||||
[File](https://platform.openai.com/docs/api-reference/files/object) object that
|
||||
contains all the parts you uploaded. This File is usable in the rest of our
|
||||
platform as a regular File object.
|
||||
|
||||
For certain `purpose`s, the correct `mime_type` must be specified. Please refer
|
||||
to documentation for the supported MIME types for your use case:
|
||||
|
||||
- [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files)
|
||||
|
||||
For guidance on the proper filename extensions for each purpose, please follow
|
||||
the documentation on
|
||||
[creating a File](https://platform.openai.com/docs/api-reference/files/create).
|
||||
|
||||
Args:
|
||||
bytes: The number of bytes in the file you are uploading.
|
||||
|
||||
filename: The name of the file to upload.
|
||||
|
||||
mime_type: The MIME type of the file.
|
||||
|
||||
This must fall within the supported MIME types for your file purpose. See the
|
||||
supported MIME types for assistants and vision.
|
||||
|
||||
purpose: The intended purpose of the uploaded file.
|
||||
|
||||
See the
|
||||
[documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
return await self._post(
|
||||
"/uploads",
|
||||
body=await async_maybe_transform(
|
||||
{
|
||||
"bytes": bytes,
|
||||
"filename": filename,
|
||||
"mime_type": mime_type,
|
||||
"purpose": purpose,
|
||||
},
|
||||
upload_create_params.UploadCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Upload,
|
||||
)
|
||||
|
||||
async def cancel(
|
||||
self,
|
||||
upload_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""Cancels the Upload.
|
||||
|
||||
No Parts may be added after an Upload is cancelled.
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not upload_id:
|
||||
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
|
||||
return await self._post(
|
||||
f"/uploads/{upload_id}/cancel",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Upload,
|
||||
)
|
||||
|
||||
async def complete(
|
||||
self,
|
||||
upload_id: str,
|
||||
*,
|
||||
part_ids: List[str],
|
||||
md5: str | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> Upload:
|
||||
"""
|
||||
Completes the
|
||||
[Upload](https://platform.openai.com/docs/api-reference/uploads/object).
|
||||
|
||||
Within the returned Upload object, there is a nested
|
||||
[File](https://platform.openai.com/docs/api-reference/files/object) object that
|
||||
is ready to use in the rest of the platform.
|
||||
|
||||
You can specify the order of the Parts by passing in an ordered list of the Part
|
||||
IDs.
|
||||
|
||||
The number of bytes uploaded upon completion must match the number of bytes
|
||||
initially specified when creating the Upload object. No Parts may be added after
|
||||
an Upload is completed.
|
||||
|
||||
Args:
|
||||
part_ids: The ordered list of Part IDs.
|
||||
|
||||
md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
|
||||
matches what you expect.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not upload_id:
|
||||
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
|
||||
return await self._post(
|
||||
f"/uploads/{upload_id}/complete",
|
||||
body=await async_maybe_transform(
|
||||
{
|
||||
"part_ids": part_ids,
|
||||
"md5": md5,
|
||||
},
|
||||
upload_complete_params.UploadCompleteParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Upload,
|
||||
)
|
||||
|
||||
|
||||
class UploadsWithRawResponse:
|
||||
def __init__(self, uploads: Uploads) -> None:
|
||||
self._uploads = uploads
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
uploads.create,
|
||||
)
|
||||
self.cancel = _legacy_response.to_raw_response_wrapper(
|
||||
uploads.cancel,
|
||||
)
|
||||
self.complete = _legacy_response.to_raw_response_wrapper(
|
||||
uploads.complete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def parts(self) -> PartsWithRawResponse:
|
||||
return PartsWithRawResponse(self._uploads.parts)
|
||||
|
||||
|
||||
class AsyncUploadsWithRawResponse:
|
||||
def __init__(self, uploads: AsyncUploads) -> None:
|
||||
self._uploads = uploads
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
uploads.create,
|
||||
)
|
||||
self.cancel = _legacy_response.async_to_raw_response_wrapper(
|
||||
uploads.cancel,
|
||||
)
|
||||
self.complete = _legacy_response.async_to_raw_response_wrapper(
|
||||
uploads.complete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def parts(self) -> AsyncPartsWithRawResponse:
|
||||
return AsyncPartsWithRawResponse(self._uploads.parts)
|
||||
|
||||
|
||||
class UploadsWithStreamingResponse:
|
||||
def __init__(self, uploads: Uploads) -> None:
|
||||
self._uploads = uploads
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
uploads.create,
|
||||
)
|
||||
self.cancel = to_streamed_response_wrapper(
|
||||
uploads.cancel,
|
||||
)
|
||||
self.complete = to_streamed_response_wrapper(
|
||||
uploads.complete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def parts(self) -> PartsWithStreamingResponse:
|
||||
return PartsWithStreamingResponse(self._uploads.parts)
|
||||
|
||||
|
||||
class AsyncUploadsWithStreamingResponse:
|
||||
def __init__(self, uploads: AsyncUploads) -> None:
|
||||
self._uploads = uploads
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
uploads.create,
|
||||
)
|
||||
self.cancel = async_to_streamed_response_wrapper(
|
||||
uploads.cancel,
|
||||
)
|
||||
self.complete = async_to_streamed_response_wrapper(
|
||||
uploads.complete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def parts(self) -> AsyncPartsWithStreamingResponse:
|
||||
return AsyncPartsWithStreamingResponse(self._uploads.parts)
|
@ -9,36 +9,26 @@ from .shared import (
|
||||
ErrorObject as ErrorObject,
|
||||
FunctionDefinition as FunctionDefinition,
|
||||
FunctionParameters as FunctionParameters,
|
||||
ResponseFormatText as ResponseFormatText,
|
||||
ResponseFormatJSONObject as ResponseFormatJSONObject,
|
||||
ResponseFormatJSONSchema as ResponseFormatJSONSchema,
|
||||
)
|
||||
from .upload import Upload as Upload
|
||||
from .embedding import Embedding as Embedding
|
||||
from .chat_model import ChatModel as ChatModel
|
||||
from .completion import Completion as Completion
|
||||
from .moderation import Moderation as Moderation
|
||||
from .audio_model import AudioModel as AudioModel
|
||||
from .batch_error import BatchError as BatchError
|
||||
from .file_object import FileObject as FileObject
|
||||
from .image_model import ImageModel as ImageModel
|
||||
from .file_content import FileContent as FileContent
|
||||
from .file_deleted import FileDeleted as FileDeleted
|
||||
from .file_purpose import FilePurpose as FilePurpose
|
||||
from .model_deleted import ModelDeleted as ModelDeleted
|
||||
from .images_response import ImagesResponse as ImagesResponse
|
||||
from .completion_usage import CompletionUsage as CompletionUsage
|
||||
from .file_list_params import FileListParams as FileListParams
|
||||
from .moderation_model import ModerationModel as ModerationModel
|
||||
from .batch_list_params import BatchListParams as BatchListParams
|
||||
from .completion_choice import CompletionChoice as CompletionChoice
|
||||
from .image_edit_params import ImageEditParams as ImageEditParams
|
||||
from .file_create_params import FileCreateParams as FileCreateParams
|
||||
from .batch_create_params import BatchCreateParams as BatchCreateParams
|
||||
from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
|
||||
from .upload_create_params import UploadCreateParams as UploadCreateParams
|
||||
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
|
||||
from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
|
||||
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
|
||||
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
|
||||
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
|
||||
|
@ -3,7 +3,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .translation import Translation as Translation
|
||||
from .speech_model import SpeechModel as SpeechModel
|
||||
from .transcription import Transcription as Transcription
|
||||
from .speech_create_params import SpeechCreateParams as SpeechCreateParams
|
||||
from .translation_create_params import TranslationCreateParams as TranslationCreateParams
|
||||
|
@ -5,8 +5,6 @@ from __future__ import annotations
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
from .speech_model import SpeechModel
|
||||
|
||||
__all__ = ["SpeechCreateParams"]
|
||||
|
||||
|
||||
@ -14,7 +12,7 @@ class SpeechCreateParams(TypedDict, total=False):
|
||||
input: Required[str]
|
||||
"""The text to generate audio for. The maximum length is 4096 characters."""
|
||||
|
||||
model: Required[Union[str, SpeechModel]]
|
||||
model: Required[Union[str, Literal["tts-1", "tts-1-hd"]]]
|
||||
"""
|
||||
One of the available [TTS models](https://platform.openai.com/docs/models/tts):
|
||||
`tts-1` or `tts-1-hd`
|
||||
|
@ -1,7 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing_extensions import Literal, TypeAlias
|
||||
|
||||
__all__ = ["SpeechModel"]
|
||||
|
||||
SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd"]
|
@ -1,6 +1,7 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["Transcription"]
|
||||
|
@ -6,7 +6,6 @@ from typing import List, Union
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
from ..._types import FileTypes
|
||||
from ..audio_model import AudioModel
|
||||
|
||||
__all__ = ["TranscriptionCreateParams"]
|
||||
|
||||
@ -18,7 +17,7 @@ class TranscriptionCreateParams(TypedDict, total=False):
|
||||
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
"""
|
||||
|
||||
model: Required[Union[str, AudioModel]]
|
||||
model: Required[Union[str, Literal["whisper-1"]]]
|
||||
"""ID of the model to use.
|
||||
|
||||
Only `whisper-1` (which is powered by our open source Whisper V2 model) is
|
||||
|
@ -1,6 +1,7 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["Translation"]
|
||||
|
@ -3,10 +3,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Required, TypedDict
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
from ..._types import FileTypes
|
||||
from ..audio_model import AudioModel
|
||||
|
||||
__all__ = ["TranslationCreateParams"]
|
||||
|
||||
@ -18,7 +17,7 @@ class TranslationCreateParams(TypedDict, total=False):
|
||||
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
"""
|
||||
|
||||
model: Required[Union[str, AudioModel]]
|
||||
model: Required[Union[str, Literal["whisper-1"]]]
|
||||
"""ID of the model to use.
|
||||
|
||||
Only `whisper-1` (which is powered by our open source Whisper V2 model) is
|
||||
|
@ -1,7 +0,0 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing_extensions import Literal, TypeAlias
|
||||
|
||||
__all__ = ["AudioModel"]
|
||||
|
||||
AudioModel: TypeAlias = Literal["whisper-1"]
|
@ -1,6 +1,7 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
|
||||
|
||||
from .._models import BaseModel
|
||||
|
||||
__all__ = ["BatchRequestCounts"]
|
||||
|
@ -23,6 +23,7 @@ from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
|
||||
from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
|
||||
from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
|
||||
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
|
||||
from .assistant_response_format import AssistantResponseFormat as AssistantResponseFormat
|
||||
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
|
||||
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
|
||||
from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
|
||||
@ -30,6 +31,7 @@ from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpr
|
||||
from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
|
||||
from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
|
||||
from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
|
||||
from .assistant_response_format_param import AssistantResponseFormatParam as AssistantResponseFormatParam
|
||||
from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
|
||||
from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam
|
||||
from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam
|
||||
|
@ -89,11 +89,6 @@ class Assistant(BaseModel):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
|
@ -3,9 +3,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Union, Iterable, Optional
|
||||
from typing_extensions import Literal, Required, TypeAlias, TypedDict
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
from ..chat_model import ChatModel
|
||||
from .assistant_tool_param import AssistantToolParam
|
||||
from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
|
||||
|
||||
@ -23,7 +22,33 @@ __all__ = [
|
||||
|
||||
|
||||
class AssistantCreateParams(TypedDict, total=False):
|
||||
model: Required[Union[str, ChatModel]]
|
||||
model: Required[
|
||||
Union[
|
||||
str,
|
||||
Literal[
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
],
|
||||
]
|
||||
]
|
||||
"""ID of the model to use.
|
||||
|
||||
You can use the
|
||||
@ -60,11 +85,6 @@ class AssistantCreateParams(TypedDict, total=False):
|
||||
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
|
||||
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
||||
|
||||
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
||||
Outputs which guarantees the model will match your supplied JSON schema. Learn
|
||||
more in the
|
||||
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
||||
|
||||
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||||
message the model generates is valid JSON.
|
||||
|
||||
@ -145,7 +165,7 @@ class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=
|
||||
"""Always `static`."""
|
||||
|
||||
|
||||
ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
|
||||
ToolResourcesFileSearchVectorStoreChunkingStrategy = Union[
|
||||
ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
|
||||
]
|
||||
|
||||
|
13
site-packages/openai/types/beta/assistant_response_format.py
Normal file
13
site-packages/openai/types/beta/assistant_response_format.py
Normal file
@ -0,0 +1,13 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Optional
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["AssistantResponseFormat"]
|
||||
|
||||
|
||||
class AssistantResponseFormat(BaseModel):
|
||||
type: Optional[Literal["text", "json_object"]] = None
|
||||
"""Must be one of `text` or `json_object`."""
|
@ -1,14 +1,10 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, TypeAlias
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..shared.response_format_text import ResponseFormatText
|
||||
from ..shared.response_format_json_object import ResponseFormatJSONObject
|
||||
from ..shared.response_format_json_schema import ResponseFormatJSONSchema
|
||||
from .assistant_response_format import AssistantResponseFormat
|
||||
|
||||
__all__ = ["AssistantResponseFormatOption"]
|
||||
|
||||
AssistantResponseFormatOption: TypeAlias = Union[
|
||||
Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
|
||||
]
|
||||
AssistantResponseFormatOption = Union[Literal["none", "auto"], AssistantResponseFormat]
|
||||
|
@ -3,14 +3,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, TypeAlias
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..shared_params.response_format_text import ResponseFormatText
|
||||
from ..shared_params.response_format_json_object import ResponseFormatJSONObject
|
||||
from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
|
||||
from .assistant_response_format_param import AssistantResponseFormatParam
|
||||
|
||||
__all__ = ["AssistantResponseFormatOptionParam"]
|
||||
|
||||
AssistantResponseFormatOptionParam: TypeAlias = Union[
|
||||
Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
|
||||
]
|
||||
AssistantResponseFormatOptionParam = Union[Literal["none", "auto"], AssistantResponseFormatParam]
|
||||
|
@ -0,0 +1,12 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import Literal, TypedDict
|
||||
|
||||
__all__ = ["AssistantResponseFormatParam"]
|
||||
|
||||
|
||||
class AssistantResponseFormatParam(TypedDict, total=False):
|
||||
type: Literal["text", "json_object"]
|
||||
"""Must be one of `text` or `json_object`."""
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user