Sync up site-packages to devchat[main](3542049e)

This commit is contained in:
Update-Packages Action 2024-05-14 08:51:42 +00:00
parent e60bfbc312
commit 314fce5cdd
57 changed files with 971 additions and 901 deletions

View File

@ -18,7 +18,7 @@ devchat/__pycache__/message.cpython-38.pyc,,
devchat/__pycache__/prompt.cpython-38.pyc,,
devchat/__pycache__/store.cpython-38.pyc,,
devchat/__pycache__/utils.cpython-38.pyc,,
devchat/_cli/__init__.py,sha256=QGJsPz9XTBrKHInAxe9ucokiHZQ-XLeXUriS6fNIWis,345
devchat/_cli/__init__.py,sha256=mSJu3LdXlzc1Iyk9jU7VC9Hk59NPRHJjkJ11bNyR97I,349
devchat/_cli/__pycache__/__init__.cpython-38.pyc,,
devchat/_cli/__pycache__/errors.cpython-38.pyc,,
devchat/_cli/__pycache__/log.cpython-38.pyc,,
@ -29,21 +29,21 @@ devchat/_cli/__pycache__/router.cpython-38.pyc,,
devchat/_cli/__pycache__/run.cpython-38.pyc,,
devchat/_cli/__pycache__/topic.cpython-38.pyc,,
devchat/_cli/__pycache__/utils.cpython-38.pyc,,
devchat/_cli/errors.py,sha256=akl1b5EzZhAlhQyfcFNOSTaLmJ3zG5PSTVrRGPaJ1bg,58
devchat/_cli/log.py,sha256=QU7jXZC3FFEFH-_9KaHtMESiY8qGX4ocG8MKDpg0cSY,3278
devchat/_cli/main.py,sha256=1IFbGSs6PbmoRmL3r0-SwlsGaNyl0mQ1tpUgYRW8GuQ,582
devchat/_cli/prompt.py,sha256=PniymJQqNecYz02mrduYHXO5LeSjDxdraWyI_7dEtrE,2916
devchat/_cli/route.py,sha256=9YVnry0OpW4B_RUiRtXqpblOZfQoFmSH6Xs7hflzmRg,2490
devchat/_cli/router.py,sha256=XKzaSxQ5dVicVnoA-jqrecB-jXYDvIX82mVy5FlAWaU,6382
devchat/_cli/run.py,sha256=QAidaTAvVLPyqylROiGgd5y5370K4ylJvUORjZoziVo,6657
devchat/_cli/topic.py,sha256=Nxiir_0ArOQt377ywM8vxmEfGIGUfIRCiHSVD2z-nP8,1167
devchat/_cli/utils.py,sha256=ooepOTF-0mLN6sg5wcnnwE52WkWWayZBo7NepEFxveI,5460
devchat/anthropic/__init__.py,sha256=xaFR1uXxn0sVHBhCJdJKuWKVVgPnSLw3mlaCFFivD_8,97
devchat/_cli/errors.py,sha256=7RiLmM6Hp8UINRaMQEQDhvfEWQjvqMYQFfWk5tiU1MQ,56
devchat/_cli/log.py,sha256=DkBDNRsKNdLesTVyNyPe6a4nni34I7-qqhMd_tiar7c,3260
devchat/_cli/main.py,sha256=kNJgS628VIrd-o-svjxvpy27kbm0EJ3ESgk_0W-o0Gw,486
devchat/_cli/prompt.py,sha256=gnSL0_gnrCMGiauwGwVvYyVuWiih1RpwPrSbAcuoNXQ,2889
devchat/_cli/route.py,sha256=nX5xae6SOoae2R8losu2wqAFmpPcuwdMX5C7H7B_VYE,2386
devchat/_cli/router.py,sha256=N01EwQ30LOPqYngVqUMhsUv4AUTjIthyn7pi-M6I1eU,6491
devchat/_cli/run.py,sha256=blxGEws2TxnFvcbJgigNGwJoqgb-6T9-Vrvt0zkxu-w,6381
devchat/_cli/topic.py,sha256=WgRgCVwn3aX_gZm9JlLWFT0LKuiCSTnvMqzkiQH8B0s,1119
devchat/_cli/utils.py,sha256=NqquK-NHWuxiwUdSP_MHUjc7EqbYtTKMSQqQcSRobbY,5362
devchat/anthropic/__init__.py,sha256=FDPLNVL3UmINQbdCbVdQhJ2d9Ti6AkpZ3L2qm9rSwaU,91
devchat/anthropic/__pycache__/__init__.cpython-38.pyc,,
devchat/anthropic/__pycache__/anthropic_chat.cpython-38.pyc,,
devchat/anthropic/anthropic_chat.py,sha256=OujoXOQywPQf4gjLhdZBYTwKoRDs8hujktss3hN-BNk,423
devchat/assistant.py,sha256=LCBH5XaTiUpd3xGCdvkKD9Uyn8y7tf5aoArQTAo1CN4,6642
devchat/chat.py,sha256=TEO8OndmL4hpJ1D-QAFKO-JB_7w1kTeUC3VVwL9FSUQ,1676
devchat/anthropic/anthropic_chat.py,sha256=qQx-5kDn5uSe8LAM1IrY5ybQPS54pUSA1g3kv2Je5Sg,424
devchat/assistant.py,sha256=ucgq_VwrG2impluYhTaJdjxxuWKzv_YBTGGERtHHnk4,6690
devchat/chat.py,sha256=RM8AXyGN-MTQulaj0TBIT1NAEvL2y8eVnQGNX7xA_4Q,1677
devchat/chatmark/.gitignore,sha256=8wf0Azg8LJGtO3zamZ8sHM-ARFcedTCPK1disjofnhY,4
devchat/chatmark/README.md,sha256=akXLntx1ebzWaIqwt0hQ_8eVd79t-CQZ5hOynM20JLk,157
devchat/chatmark/__init__.py,sha256=l1xRneWsKKAWs0R4VoynYytFahCRgyvR-tbrhKK3iiE,203
@ -54,13 +54,13 @@ devchat/chatmark/__pycache__/step.cpython-38.pyc,,
devchat/chatmark/__pycache__/widgets.cpython-38.pyc,,
devchat/chatmark/chatmark_example/README.md,sha256=vtSGvEL1IOQPu56qP5s6ZazW-41iNkS_Ph0GBjjWATA,413
devchat/chatmark/chatmark_example/__pycache__/main.cpython-38.pyc,,
devchat/chatmark/chatmark_example/main.py,sha256=JfAC7opkVIUdzrOfyVwb04FlX7whVFNyeWrf-_ZWC0A,3600
devchat/chatmark/form.py,sha256=bbPQhhyMDbrrs2bX8UmVKAZ6n4kcYJEppDD3700ksbM,2586
devchat/chatmark/chatmark_example/main.py,sha256=VvWhcpCd7t3Itcc2e07sRsj2RcMF9hUifxeEfGPPUt0,3575
devchat/chatmark/form.py,sha256=8azb4U3M2OddKEU0R6bdfrnmNlIhfAFIloj_CSjPD5k,2538
devchat/chatmark/iobase.py,sha256=CjTHjDAxHkLHiNsrp4aaTjdT6mQB5Dy4B1UsJWVcKS8,913
devchat/chatmark/step.py,sha256=jATqxc1ZoeKlkEoO-0DMoyVzLYGNA58S8hL5NMn6W7A,574
devchat/chatmark/widgets.py,sha256=5FEghZ-BZPHyjPUIKZ3L6BFhNHawa2JdPX6OzBs7Yfs,10159
devchat/config.py,sha256=JJxUfCH6fr40Ek9FawfX1jh6Z6GEzU_OD3GFXOXsjig,4474
devchat/engine/__init__.py,sha256=KSlnUY42kg9EyTaW2lrhdANWl1ORkg-5vYoLZfv9e8Q,373
devchat/config.py,sha256=0mrD0KCyq6kHEjgVT0xAbJxQbsety1ThTQyda4utkH0,4133
devchat/engine/__init__.py,sha256=3Iz-16ziWQ8BsclsWWjIE0zHr1ldfev5I_uiMBNOvwU,374
devchat/engine/__pycache__/__init__.cpython-38.pyc,,
devchat/engine/__pycache__/command_parser.cpython-38.pyc,,
devchat/engine/__pycache__/command_runner.cpython-38.pyc,,
@ -68,24 +68,24 @@ devchat/engine/__pycache__/namespace.cpython-38.pyc,,
devchat/engine/__pycache__/recursive_prompter.cpython-38.pyc,,
devchat/engine/__pycache__/router.cpython-38.pyc,,
devchat/engine/__pycache__/util.cpython-38.pyc,,
devchat/engine/command_parser.py,sha256=jeJQe-_nEGhYF3mwVitkOefaFfiW9YwYCLJFooMXQLE,1694
devchat/engine/command_runner.py,sha256=o5d8L_f_0qLSkkT-E5Wosnzn2XJQ_qqJbPm_znBzW0A,9855
devchat/engine/namespace.py,sha256=MghROybwfVYhfKz8efeG38awQR4eXUThwuVL9J07MGc,5175
devchat/engine/recursive_prompter.py,sha256=Fknj_uuLcPB60tsp7V8GM_1YUXN8XHbKU1MCi66JwyM,1558
devchat/engine/router.py,sha256=1Rerwmp8POq9_G-nG_nMLiuGeWkTlh9KKv_nLKcneVk,1740
devchat/engine/util.py,sha256=7PFTBBLkUG5O2xVdH043p0am4rCBSEJoOBSGk__4vps,5943
devchat/ide/__init__.py,sha256=55h2mDn2z6y49uJUjNymD0Jg8YaxlMt-YaJKI4S6QlU,138
devchat/engine/command_parser.py,sha256=irCjyyVxLcNfHUWXnkSWzp0LHNoBNuFGrfqBm7u92rU,1694
devchat/engine/command_runner.py,sha256=sfZY3K-TDWMrB4u5PYU8t2hg8YWsciz_cI01susnAfE,9463
devchat/engine/namespace.py,sha256=AfKDg5LKVTSGr7_6MxclEjCG7eWrqjQPcm3W_PulC2Q,5158
devchat/engine/recursive_prompter.py,sha256=1SXHqqWSRjm9Dr21590KhoqHPjSYkDG1-wQt784_Ybw,1560
devchat/engine/router.py,sha256=cGEWkFlnjdaxwUKu442C9lD6seEpPbSYiYD5SoqH9Sg,1708
devchat/engine/util.py,sha256=3q7at6Vm7lTWyC_tzQ5sNiPC2m6sUcqaW9-0kG3B5Po,5776
devchat/ide/__init__.py,sha256=VaPxmh12S3oUdrZHcHHYOpQEAqOlWcPJRaOncKG5jnA,152
devchat/ide/__pycache__/__init__.cpython-38.pyc,,
devchat/ide/__pycache__/idea_services.cpython-38.pyc,,
devchat/ide/__pycache__/rpc.cpython-38.pyc,,
devchat/ide/__pycache__/service.cpython-38.pyc,,
devchat/ide/__pycache__/types.cpython-38.pyc,,
devchat/ide/__pycache__/vscode_services.cpython-38.pyc,,
devchat/ide/idea_services.py,sha256=OB3xQVf4kCS_9Gn9-GsqLqFfS4l-QNPmmp6kgd3iuVY,394
devchat/ide/rpc.py,sha256=4Ahe-s46RE35CWUto9H_EdiYm6gjY2x3yzAmHZ7-g8g,2464
devchat/ide/service.py,sha256=-kRitl7rDuD98UlIFG9JVKVK0t4lys6r7-nlDY-BBQ4,4955
devchat/ide/types.py,sha256=7GKd0zhK_oFhP-wQDXEbNzTHxO1J2rgQ2H8GtYZlOdE,1113
devchat/ide/vscode_services.py,sha256=L_F5PyGsPuR4Femt1xPEh-znLDB5sTZpAyNQc6tijv4,5264
devchat/ide/idea_services.py,sha256=KUwxmn2K9dYO7d5cmD7lTzEMGH-E4gTEKy9xwoQDAhM,393
devchat/ide/rpc.py,sha256=ag3hBLBuM4ehFE6bvfHbrMR1nMrjkxt3sJ9alHyTL4k,2344
devchat/ide/service.py,sha256=19uwObr7PdFK1VTjYU8AIeeKKCy5JnspQjI0WEFJEnY,4791
devchat/ide/types.py,sha256=HdSl_6Eu_8miq1GWzjir0MIvLFFIwtn6Yt5QF15fLWQ,1091
devchat/ide/vscode_services.py,sha256=nd6SceGju21dRCgb4XVWeK6tsBkGqDmtMggjbI9NtQM,5200
devchat/llm/__init__.py,sha256=IXhLbfNO-TV2ZIJwZOhjsL2Batb8WGq-gayrxp-z8a0,409
devchat/llm/__pycache__/__init__.cpython-38.pyc,,
devchat/llm/__pycache__/chat.cpython-38.pyc,,
@ -93,32 +93,32 @@ devchat/llm/__pycache__/openai.cpython-38.pyc,,
devchat/llm/__pycache__/pipeline.cpython-38.pyc,,
devchat/llm/__pycache__/text_confirm.cpython-38.pyc,,
devchat/llm/__pycache__/tools_call.cpython-38.pyc,,
devchat/llm/chat.py,sha256=Hs_3qZRDIjekLQwPTj2RzCmidJbJ5b1pazsDmQJe27k,3445
devchat/llm/openai.py,sha256=mG3V97sltmLUIVqvV33eO5aYjre3YHeQLt6GgjIIhQk,6237
devchat/llm/pipeline.py,sha256=hVtwEfKVZ1S90Qb9SLe3UBRJZbtLPptgCEk8JHaEpGI,2002
devchat/llm/chat.py,sha256=XWXUIpbWCMiuMCtBchrQpBpuyLwjga5KcCFzDoapbcc,3377
devchat/llm/openai.py,sha256=8hK2OByDRq8sPgROf-UvVPA8Oz0lSDfMaAFSKh0D644,6208
devchat/llm/pipeline.py,sha256=D214HASOUA7DsUm63_QDVFTYsHShPrrBwTbd0hM3tRI,1920
devchat/llm/text_confirm.py,sha256=sdt7AUFDcsOZ0fLfS0vtjdS2_8xhkTF6aF8Sn05OlI0,1462
devchat/llm/tools_call.py,sha256=Ks156bm_kkp6Sb3PP7Ci1cR4Gqf1pkye4oG5chd_rSg,8072
devchat/memory/__init__.py,sha256=BD2vKfSmWsQrOJSKKXKBwcVcGQcZamglWZDstosn4cw,134
devchat/llm/tools_call.py,sha256=OBObtFAzuqEJPq7Ro9hR4oirrcMtxGchlMQl8vL1CBc,8038
devchat/memory/__init__.py,sha256=aPR0Dt8dcf4oWXu2HME2fFSpDJDeoBayPWMFOpO8v5k,133
devchat/memory/__pycache__/__init__.cpython-38.pyc,,
devchat/memory/__pycache__/base.cpython-38.pyc,,
devchat/memory/__pycache__/fixsize_memory.cpython-38.pyc,,
devchat/memory/base.py,sha256=mabEkWtez31gMtRamQkEMUGX6yEzb3P0uHFEgK1IBhI,598
devchat/memory/fixsize_memory.py,sha256=iPBeylq8UayOepds6qrvVQf46pd8vMcrPO6brx_g-Po,1618
devchat/message.py,sha256=OdFQ8rv4ZrX-wOrLb4KRdqfvyAvCbaAMhDBXDHWuwRU,758
devchat/openai/__init__.py,sha256=9DcELA9I86vSQIySgABiZSb_QgE4qaT5s3n-ODASqiA,283
devchat/message.py,sha256=oJr_KXlAfzGEFHB-SAN4iy4KMr7CdSB9VKUIOhdULCY,759
devchat/openai/__init__.py,sha256=3da58vM6OExDCzC6VIvIWz80FwTDBT5I-UbALQC7R2Q,284
devchat/openai/__pycache__/__init__.cpython-38.pyc,,
devchat/openai/__pycache__/http_openai.cpython-38.pyc,,
devchat/openai/__pycache__/openai_chat.cpython-38.pyc,,
devchat/openai/__pycache__/openai_message.cpython-38.pyc,,
devchat/openai/__pycache__/openai_prompt.cpython-38.pyc,,
devchat/openai/http_openai.py,sha256=YH4tyNLq6ODmz9DCKcSOuTu6L4deV0dWrEEMGlxL1Rw,2653
devchat/openai/openai_chat.py,sha256=7SDYyKtjlwGIMDlv2ovfDEDNWkqsPu_GnAQ9_84XbqU,5185
devchat/openai/openai_message.py,sha256=xTmglsj5Iyvcytn3pUYhwkuiyJSx932N88fS4OCJ7Qk,3293
devchat/openai/openai_prompt.py,sha256=ENh3XHOZlWnONwfw_7r6So7KJg_ihXx-qqpO7DOXdhw,10766
devchat/prompt.py,sha256=CRFvl6x5Fs2CmaAghY4Bo05LKr6DeuYJe5Ut6w-lh_Y,9411
devchat/store.py,sha256=wswzzPLlD7ievAsqsPiZo4NZek2lEZkPRJnu3QiEveE,12056
devchat/openai/http_openai.py,sha256=5hEc1zRxhguia6vE3gpt1VrCJ6eyJuDe1xNIr5DRr8o,2695
devchat/openai/openai_chat.py,sha256=GbkLzNIgWAHFCVbVsvz5BOUlNu58MayR1ax6lNYyqpc,5095
devchat/openai/openai_message.py,sha256=HHBSLVxgEFpoiX47Ao4jtQj31X0LQ4CKgo9sLHPpxxY,3310
devchat/openai/openai_prompt.py,sha256=vg4hj6MChn4Dhcxq3wLLZ6jAL2b-BpeZaa9R2KQN28k,10663
devchat/prompt.py,sha256=ZMdyRE4WIidg46_S3-VwpZ6D52bVYpP5ANhzvSJyY8c,9492
devchat/store.py,sha256=-KHp0N9F0t2VjAVXcANJa6tusNZQnd8d5vY4tuPzIFA,11954
devchat/tiktoken_cache/9b5ad71b2ce5302211f9c61530b329a4922fc6a4,sha256=Ijkht27pm96ZW3_3OFE-7xAPtR0YyTWXoRO8_-hlsqc,1681126
devchat/utils.py,sha256=5EY6IyasRKc1LQ2gFE0or7GxhJqtetGUVb5SAeqes_E,7802
devchat/utils.py,sha256=GdI7DovvEzeG6oKnn8R-siGeW0myfQ76X1tDAhEKELQ,7650
devchat/workflow/README.md,sha256=a48aJE5X8vjgasI9lhJljlUjwLvn7vO7tlG0PFM67m0,201
devchat/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
devchat/workflow/__pycache__/__init__.cpython-38.pyc,,
@ -131,23 +131,23 @@ devchat/workflow/__pycache__/schema.cpython-38.pyc,,
devchat/workflow/__pycache__/step.cpython-38.pyc,,
devchat/workflow/__pycache__/user_setting.cpython-38.pyc,,
devchat/workflow/__pycache__/workflow.cpython-38.pyc,,
devchat/workflow/cli.py,sha256=RQyzg1mQm-sVNyGQM-4tgAUbruwTpMLC6nLy0GB53fw,461
devchat/workflow/cli.py,sha256=V2kef1wO-BpWuAUPoqCXS2t9twdArXinNCy0QwE9vyM,462
devchat/workflow/command/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
devchat/workflow/command/__pycache__/__init__.cpython-38.pyc,,
devchat/workflow/command/__pycache__/config.cpython-38.pyc,,
devchat/workflow/command/__pycache__/env.cpython-38.pyc,,
devchat/workflow/command/__pycache__/list.cpython-38.pyc,,
devchat/workflow/command/__pycache__/update.cpython-38.pyc,,
devchat/workflow/command/config.py,sha256=4O22ebz-cx_CiKpEM7NnlbW02rb_OZk3vV_JKHMUPmk,713
devchat/workflow/command/env.py,sha256=lTcWns9UnEKIRtr1wh9f1gVqBMPN5Tjr6f-r2d3LOKI,2367
devchat/workflow/command/list.py,sha256=sgX9DQ68VvoYEMndBoqa7mEep4aaSG--mWOS6iapWJ0,3384
devchat/workflow/command/update.py,sha256=CnJE3P4RIAHrDv3_QPi47u0kGaZvP08el7C-lzLoxOU,9820
devchat/workflow/command/config.py,sha256=BH2ufIwMBbwWv-P3v8n_kKQQb55tXKGRq4eoFHxyc2s,712
devchat/workflow/command/env.py,sha256=wXZc497GwSjWk8T37krTkxqyjUhWSAh0c0RCwmLzRw8,2301
devchat/workflow/command/list.py,sha256=wCsfRZYLgM-CC3JWh7my42TiFgk2xdW_n4qNq2EPA3w,3378
devchat/workflow/command/update.py,sha256=bsgtL9mt_jCBW-LrbjGpxSQqsCJpogPIa2E5fPMK3Es,9730
devchat/workflow/command/update_flowchart.md,sha256=TrtZtK7K04yihUlIOvI10UCBTmZIumaRjRJJxkn85ow,1737
devchat/workflow/env_manager.py,sha256=6A_bZZ1JkpPERQ_QFII8SBMcQDmQylBH3r2vv8wNtZw,5568
devchat/workflow/envs.py,sha256=7vf71OG5JOlx8IGomjwDfOXPoRlJc11MmEOd4r4O5OY,297
devchat/workflow/namespace.py,sha256=WBDL_S-Lt2xW6tvkhY9cIb6G_pwcJ3v5-tYhsPvPusI,1611
devchat/workflow/env_manager.py,sha256=jxTrHP8Ki6N16tSZClKTWlVcM2KoO93r7ZWr4Is6LjQ,5434
devchat/workflow/envs.py,sha256=-lVTLjWRMrb8RGVVlHgWKCiGZaojNdmycjHFT0ZKjEo,298
devchat/workflow/namespace.py,sha256=zSYrz2tTwxGNfewqyWFNKN7vOeL2xxtmLRq0Pi1dx0Q,1613
devchat/workflow/path.py,sha256=ldTOXJmff7vP3YjFVo_0Awu2fTxZmAQOXlhD-_v7EkM,1110
devchat/workflow/schema.py,sha256=XIDZ6uqejeXVSGNJBc0uqDMs2YGvQF8RgCxfP_R6NYQ,1746
devchat/workflow/step.py,sha256=0q7-sH_n3Gn1cqmq-8TJqr_lUXWd0JIwWOOyKixwYiw,5805
devchat/workflow/user_setting.py,sha256=oPKLobDH36_kcQT5RAp5Oa0ZKw4cjXPzLn2zLOvdzvI,621
devchat/workflow/workflow.py,sha256=7Pk6RORqmrE4gvF70ESgw5veUHNW9axAT0TKVp4MRg0,7677
devchat/workflow/schema.py,sha256=FFqF0qJzr5gP9X3KefcA630kBKonC-Mn0TeyJGuXeVE,1674
devchat/workflow/step.py,sha256=zs1k0LtjtBa4pD5hZCzw_ubDXv7osq9WAicEWtLALF4,5725
devchat/workflow/user_setting.py,sha256=4OiV5s3RuO84fiJPdR_QY4bOJYP6g_68hkrdVQUVahQ,624
devchat/workflow/workflow.py,sha256=yzhwdXa81-xqt_oSa29hS3lhVWXMFlYdmL1Iiz-LQFA,7624

View File

@ -1,17 +1,18 @@
import os
from .log import log
from .prompt import prompt
from .route import route
from .run import run
from .topic import topic
from .route import route
script_dir = os.path.dirname(os.path.realpath(__file__))
os.environ['TIKTOKEN_CACHE_DIR'] = os.path.join(script_dir, '..', 'tiktoken_cache')
os.environ["TIKTOKEN_CACHE_DIR"] = os.path.join(script_dir, "..", "tiktoken_cache")
__all__ = [
'log',
'prompt',
'run',
'topic',
'route',
"log",
"prompt",
"run",
"topic",
"route",
]

View File

@ -1,4 +1,2 @@
class MissContentInPromptException(Exception):
pass

View File

@ -1,13 +1,12 @@
# pylint: disable=import-outside-toplevel
import json
import sys
import time
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import click
@dataclass
class PromptData:
model: str = "none"
@ -19,28 +18,34 @@ class PromptData:
response_tokens: int = 0
@click.command(help='Process logs')
@click.option('--skip', default=0, help='Skip number prompts before showing the prompt history.')
@click.option('-n', '--max-count', default=1, help='Limit the number of commits to output.')
@click.option('-t', '--topic', 'topic_root', default=None,
help='Hash of the root prompt of the topic to select prompts from.')
@click.option('--insert', default=None, help='JSON string of the prompt to insert into the log.')
@click.option('--delete', default=None, help='Hash of the leaf prompt to delete from the log.')
@click.command(help="Process logs")
@click.option("--skip", default=0, help="Skip number prompts before showing the prompt history.")
@click.option("-n", "--max-count", default=1, help="Limit the number of commits to output.")
@click.option(
"-t",
"--topic",
"topic_root",
default=None,
help="Hash of the root prompt of the topic to select prompts from.",
)
@click.option("--insert", default=None, help="JSON string of the prompt to insert into the log.")
@click.option("--delete", default=None, help="Hash of the leaf prompt to delete from the log.")
def log(skip, max_count, topic_root, insert, delete):
"""
Manage the prompt history.
"""
from devchat._cli.utils import get_model_config, handle_errors, init_dir
from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig, OpenAIPrompt
from devchat.store import Store
from devchat._cli.utils import handle_errors, init_dir, get_model_config
from devchat.utils import get_logger, get_user_info
logger = get_logger(__name__)
if (insert or delete) and (skip != 0 or max_count != 1 or topic_root is not None):
print("Error: The --insert or --delete option cannot be used with other options.",
file=sys.stderr)
print(
"Error: The --insert or --delete option cannot be used with other options.",
file=sys.stderr,
)
sys.exit(1)
repo_chat_dir, user_chat_dir = init_dir()

View File

@ -1,15 +1,11 @@
"""
This module contains the main function for the DevChat CLI.
"""
import click
from devchat._cli import log, prompt, route, run, topic
from devchat.utils import get_logger
from devchat._cli import log
from devchat._cli import prompt
from devchat._cli import run
from devchat._cli import topic
from devchat._cli import route
from devchat.workflow.cli import workflow
logger = get_logger(__name__)

View File

@ -1,33 +1,61 @@
# pylint: disable=import-outside-toplevel
import sys
from typing import List, Optional
import click
@click.command(help='Interact with the large language model (LLM).')
@click.argument('content', required=False)
@click.option('-p', '--parent', help='Input the parent prompt hash to continue the conversation.')
@click.option('-r', '--reference', multiple=True,
help='Input one or more specific previous prompts to include in the current prompt.')
@click.option('-i', '--instruct', multiple=True,
help='Add one or more files to the prompt as instructions.')
@click.option('-c', '--context', multiple=True,
help='Add one or more files to the prompt as a context.')
@click.option('-m', '--model', help='Specify the model to use for the prompt.')
@click.option('--config', 'config_str',
help='Specify a JSON string to overwrite the default configuration for this prompt.')
@click.option('-f', '--functions', type=click.Path(exists=True),
help='Path to a JSON file with functions for the prompt.')
@click.option('-n', '--function-name',
help='Specify the function name when the content is the output of a function.')
@click.option('-ns', '--not-store', is_flag=True, default=False, required=False,
help='Do not save the conversation to the store.')
def prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
functions: Optional[str] = None, function_name: Optional[str] = None,
not_store: Optional[bool] = False):
@click.command(help="Interact with the large language model (LLM).")
@click.argument("content", required=False)
@click.option("-p", "--parent", help="Input the parent prompt hash to continue the conversation.")
@click.option(
"-r",
"--reference",
multiple=True,
help="Input one or more specific previous prompts to include in the current prompt.",
)
@click.option(
"-i", "--instruct", multiple=True, help="Add one or more files to the prompt as instructions."
)
@click.option(
"-c", "--context", multiple=True, help="Add one or more files to the prompt as a context."
)
@click.option("-m", "--model", help="Specify the model to use for the prompt.")
@click.option(
"--config",
"config_str",
help="Specify a JSON string to overwrite the default configuration for this prompt.",
)
@click.option(
"-f",
"--functions",
type=click.Path(exists=True),
help="Path to a JSON file with functions for the prompt.",
)
@click.option(
"-n",
"--function-name",
help="Specify the function name when the content is the output of a function.",
)
@click.option(
"-ns",
"--not-store",
is_flag=True,
default=False,
required=False,
help="Do not save the conversation to the store.",
)
def prompt(
content: Optional[str],
parent: Optional[str],
reference: Optional[List[str]],
instruct: Optional[List[str]],
context: Optional[List[str]],
model: Optional[str],
config_str: Optional[str] = None,
functions: Optional[str] = None,
function_name: Optional[str] = None,
not_store: Optional[bool] = False,
):
"""
This command performs interactions with the specified large language model (LLM)
by sending prompts and receiving responses.
@ -62,6 +90,7 @@ def prompt(content: Optional[str], parent: Optional[str], reference: Optional[Li
"""
from devchat._cli.router import llm_prompt
llm_prompt(
content,
parent,
@ -72,5 +101,6 @@ def prompt(content: Optional[str], parent: Optional[str], reference: Optional[Li
config_str,
functions,
function_name,
not_store)
not_store,
)
sys.exit(0)

View File

@ -1,28 +1,48 @@
# pylint: disable=import-outside-toplevel
import sys
from typing import List, Optional
import click
@click.command(help='Route a prompt to the specified LLM')
@click.argument('content', required=False)
@click.option('-p', '--parent', help='Input the parent prompt hash to continue the conversation.')
@click.option('-r', '--reference', multiple=True,
help='Input one or more specific previous prompts to include in the current prompt.')
@click.option('-i', '--instruct', multiple=True,
help='Add one or more files to the prompt as instructions.')
@click.option('-c', '--context', multiple=True,
help='Add one or more files to the prompt as a context.')
@click.option('-m', '--model', help='Specify the model to use for the prompt.')
@click.option('--config', 'config_str',
help='Specify a JSON string to overwrite the default configuration for this prompt.')
@click.option('-a', '--auto', is_flag=True, default=False, required=False,
help='Answer question by function-calling.')
def route(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
auto: Optional[bool] = False):
@click.command(help="Route a prompt to the specified LLM")
@click.argument("content", required=False)
@click.option("-p", "--parent", help="Input the parent prompt hash to continue the conversation.")
@click.option(
"-r",
"--reference",
multiple=True,
help="Input one or more specific previous prompts to include in the current prompt.",
)
@click.option(
"-i", "--instruct", multiple=True, help="Add one or more files to the prompt as instructions."
)
@click.option(
"-c", "--context", multiple=True, help="Add one or more files to the prompt as a context."
)
@click.option("-m", "--model", help="Specify the model to use for the prompt.")
@click.option(
"--config",
"config_str",
help="Specify a JSON string to overwrite the default configuration for this prompt.",
)
@click.option(
"-a",
"--auto",
is_flag=True,
default=False,
required=False,
help="Answer question by function-calling.",
)
def route(
content: Optional[str],
parent: Optional[str],
reference: Optional[List[str]],
instruct: Optional[List[str]],
context: Optional[List[str]],
model: Optional[str],
config_str: Optional[str] = None,
auto: Optional[bool] = False,
):
"""
This command performs interactions with the specified large language model (LLM)
by sending prompts and receiving responses.
@ -58,14 +78,5 @@ def route(content: Optional[str], parent: Optional[str], reference: Optional[Lis
"""
from devchat._cli.router import llm_route
llm_route(
content,
parent,
reference,
instruct,
context,
model,
config_str,
auto
)
llm_route(content, parent, reference, instruct, context, model, config_str, auto)
sys.exit(0)

View File

@ -1,4 +1,3 @@
# pylint: disable=import-outside-toplevel
import json
import sys
from typing import List, Optional
@ -6,10 +5,8 @@ from typing import List, Optional
from devchat.workflow.workflow import Workflow
def _get_model_and_config(
model: Optional[str],
config_str: Optional[str]):
from devchat._cli.utils import init_dir, get_model_config
def _get_model_and_config(model: Optional[str], config_str: Optional[str]):
from devchat._cli.utils import get_model_config, init_dir
_1, user_chat_dir = init_dir()
model, config = get_model_config(user_chat_dir, model)
@ -20,15 +17,17 @@ def _get_model_and_config(
parameters_data.update(config_data)
return model, parameters_data
def _load_tool_functions(functions: Optional[str]):
try:
if functions:
with open(functions, 'r', encoding="utf-8") as f_file:
with open(functions, "r", encoding="utf-8") as f_file:
return json.load(f_file)
return None
except Exception:
return None
def _load_instruction_contents(content: str, instruct: Optional[List[str]]):
from devchat.engine import load_workflow_instruction
from devchat.utils import parse_files
@ -41,24 +40,31 @@ def _load_instruction_contents(content: str, instruct: Optional[List[str]]):
return instruct_contents
def before_prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
functions: Optional[str] = None, function_name: Optional[str] = None,
not_store: Optional[bool] = False):
def before_prompt(
content: Optional[str],
parent: Optional[str],
reference: Optional[List[str]],
instruct: Optional[List[str]],
context: Optional[List[str]],
model: Optional[str],
config_str: Optional[str] = None,
functions: Optional[str] = None,
function_name: Optional[str] = None,
not_store: Optional[bool] = False,
):
from devchat._cli.errors import MissContentInPromptException
from devchat._cli.utils import init_dir
from devchat.assistant import Assistant
from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig
from devchat.store import Store
from devchat.utils import parse_files
from devchat._cli.utils import init_dir
from devchat._cli.errors import MissContentInPromptException
repo_chat_dir, _1 = init_dir()
if content is None:
content = sys.stdin.read()
if content == '':
if content == "":
raise MissContentInPromptException()
instruct_contents = _load_instruction_contents(content, instruct)
@ -74,53 +80,80 @@ def before_prompt(content: Optional[str], parent: Optional[str], reference: Opti
assistant = Assistant(chat, chat_store, max_input_tokens, not not_store)
assistant.make_prompt(
request = content,
instruct_contents = instruct_contents,
context_contents = context_contents,
functions = tool_functions,
request=content,
instruct_contents=instruct_contents,
context_contents=context_contents,
functions=tool_functions,
parent=parent,
references=reference,
function_name=function_name
function_name=function_name,
)
return model, assistant, content
def llm_prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
functions: Optional[str] = None, function_name: Optional[str] = None,
not_store: Optional[bool] = False):
def llm_prompt(
content: Optional[str],
parent: Optional[str],
reference: Optional[List[str]],
instruct: Optional[List[str]],
context: Optional[List[str]],
model: Optional[str],
config_str: Optional[str] = None,
functions: Optional[str] = None,
function_name: Optional[str] = None,
not_store: Optional[bool] = False,
):
from devchat._cli.utils import handle_errors
with handle_errors():
_1, assistant, _3, = before_prompt(
content, parent, reference, instruct, context,
model, config_str, functions, function_name, not_store
)
(
_1,
assistant,
_3,
) = before_prompt(
content,
parent,
reference,
instruct,
context,
model,
config_str,
functions,
function_name,
not_store,
)
print(assistant.prompt.formatted_header())
for response in assistant.iterate_response():
print(response, end='', flush=True)
print(response, end="", flush=True)
def llm_commmand(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None):
from devchat.engine import run_command
def llm_commmand(
content: Optional[str],
parent: Optional[str],
reference: Optional[List[str]],
instruct: Optional[List[str]],
context: Optional[List[str]],
model: Optional[str],
config_str: Optional[str] = None,
):
from devchat._cli.utils import handle_errors
from devchat.engine import run_command
with handle_errors():
model, assistant, content = before_prompt(
content, parent, reference, instruct, context, model, config_str, None, None, True
)
)
print(assistant.prompt.formatted_header())
command_result = run_command(
model_name = model,
history_messages = assistant.prompt.messages,
input_text = content,
parent_hash = parent,
auto_fun = False)
model_name=model,
history_messages=assistant.prompt.messages,
input_text=content,
parent_hash=parent,
auto_fun=False,
)
if command_result is not None:
sys.exit(0)
@ -129,17 +162,23 @@ def llm_commmand(content: Optional[str], parent: Optional[str], reference: Optio
sys.exit(-1)
def llm_route(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
auto: Optional[bool] = False):
from devchat.engine import run_command
def llm_route(
content: Optional[str],
parent: Optional[str],
reference: Optional[List[str]],
instruct: Optional[List[str]],
context: Optional[List[str]],
model: Optional[str],
config_str: Optional[str] = None,
auto: Optional[bool] = False,
):
from devchat._cli.utils import handle_errors
from devchat.engine import run_command
with handle_errors():
model, assistant, content = before_prompt(
content, parent, reference, instruct, context, model, config_str, None, None, True
)
)
name, user_input = Workflow.parse_trigger(content)
workflow = Workflow.load(name) if name else None
@ -165,13 +204,14 @@ def llm_route(content: Optional[str], parent: Optional[str], reference: Optional
print(assistant.prompt.formatted_header())
command_result = run_command(
model_name = model,
history_messages = assistant.prompt.messages,
input_text = content,
parent_hash = parent,
auto_fun = auto)
model_name=model,
history_messages=assistant.prompt.messages,
input_text=content,
parent_hash=parent,
auto_fun=auto,
)
if command_result is not None:
sys.exit(command_result[0])
for response in assistant.iterate_response():
print(response, end='', flush=True)
print(response, end="", flush=True)

View File

@ -1,49 +1,82 @@
# pylint: disable=import-outside-toplevel
from typing import List, Optional, Tuple
import click
@click.command(
help="The 'command' argument is the name of the command to run or get information about.")
@click.argument('command', required=False, default='')
@click.option('--list', 'list_flag', is_flag=True, default=False,
help='List all specified commands in JSON format.')
@click.option('--recursive', '-r', 'recursive_flag', is_flag=True, default=True,
help='List commands recursively.')
@click.option('--update-sys', 'update_sys_flag', is_flag=True, default=False,
help='Pull the `sys` command directory from the DevChat repository.')
@click.option('-p', '--parent', help='Input the parent prompt hash to continue the conversation.')
@click.option('-r', '--reference', multiple=True,
help='Input one or more specific previous prompts to include in the current prompt.')
@click.option('-i', '--instruct', multiple=True,
help='Add one or more files to the prompt as instructions.')
@click.option('-c', '--context', multiple=True,
help='Add one or more files to the prompt as a context.')
@click.option('-m', '--model', help='Specify the model to use for the prompt.')
@click.option('--config', 'config_str',
help='Specify a JSON string to overwrite the default configuration for this prompt.')
# pylint: disable=redefined-outer-name
def run(command: str, list_flag: bool, recursive_flag: bool, update_sys_flag: bool,
parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None):
help="The 'command' argument is the name of the command to run or get information about."
)
@click.argument("command", required=False, default="")
@click.option(
"--list",
"list_flag",
is_flag=True,
default=False,
help="List all specified commands in JSON format.",
)
@click.option(
"--recursive",
"-r",
"recursive_flag",
is_flag=True,
default=True,
help="List commands recursively.",
)
@click.option(
"--update-sys",
"update_sys_flag",
is_flag=True,
default=False,
help="Pull the `sys` command directory from the DevChat repository.",
)
@click.option("-p", "--parent", help="Input the parent prompt hash to continue the conversation.")
@click.option(
"-r",
"--reference",
multiple=True,
help="Input one or more specific previous prompts to include in the current prompt.",
)
@click.option(
"-i", "--instruct", multiple=True, help="Add one or more files to the prompt as instructions."
)
@click.option(
"-c", "--context", multiple=True, help="Add one or more files to the prompt as a context."
)
@click.option("-m", "--model", help="Specify the model to use for the prompt.")
@click.option(
"--config",
"config_str",
help="Specify a JSON string to overwrite the default configuration for this prompt.",
)
def run(
command: str,
list_flag: bool,
recursive_flag: bool,
update_sys_flag: bool,
parent: Optional[str],
reference: Optional[List[str]],
instruct: Optional[List[str]],
context: Optional[List[str]],
model: Optional[str],
config_str: Optional[str] = None,
):
"""
Operate the workflow engine of DevChat.
"""
import json
import os
import sys
from devchat._cli.utils import init_dir, handle_errors
from devchat.engine import Namespace, CommandParser
from devchat.utils import get_logger
from devchat._cli.router import llm_commmand
from devchat._cli.utils import handle_errors, init_dir
from devchat.engine import CommandParser, Namespace
from devchat.utils import get_logger
logger = get_logger(__name__)
_, user_chat_dir = init_dir()
with handle_errors():
workflows_dir = os.path.join(user_chat_dir, 'workflows')
workflows_dir = os.path.join(user_chat_dir, "workflows")
if not os.path.exists(workflows_dir):
os.makedirs(workflows_dir)
if not os.path.isdir(workflows_dir):
@ -54,14 +87,14 @@ def run(command: str, list_flag: bool, recursive_flag: bool, update_sys_flag: bo
commander = CommandParser(namespace)
if update_sys_flag:
sys_dir = os.path.join(workflows_dir, 'sys')
sys_dir = os.path.join(workflows_dir, "sys")
git_urls = [
('https://gitlab.com/devchat-ai/workflows.git', 'main'),
('https://github.com/devchat-ai/workflows.git', 'main')
("https://gitlab.com/devchat-ai/workflows.git", "main"),
("https://github.com/devchat-ai/workflows.git", "main"),
]
zip_urls = [
'https://gitlab.com/devchat-ai/workflows/-/archive/main/workflows-main.zip',
'https://codeload.github.com/devchat-ai/workflows/zip/refs/heads/main'
"https://gitlab.com/devchat-ai/workflows/-/archive/main/workflows-main.zip",
"https://codeload.github.com/devchat-ai/workflows/zip/refs/heads/main",
]
_clone_or_pull_git_repo(sys_dir, git_urls, zip_urls)
return
@ -73,26 +106,15 @@ def run(command: str, list_flag: bool, recursive_flag: bool, update_sys_flag: bo
if not cmd:
logger.warning("Existing command directory failed to parse: %s", name)
continue
commands.append({
'name': name,
'description': cmd.description,
'path': cmd.path
})
commands.append({"name": name, "description": cmd.description, "path": cmd.path})
print(json.dumps(commands, indent=2))
return
if command:
llm_commmand(
command,
parent,
reference,
instruct,
context,
model,
config_str
)
llm_commmand(command, parent, reference, instruct, context, model, config_str)
return
def __onerror(func, path, _1):
"""
Error handler for shutil.rmtree.
@ -114,18 +136,21 @@ def __onerror(func, path, _1):
# Retry the function that failed
func(path)
def __make_files_writable(directory):
"""
Recursively make all files in the directory writable.
"""
import os
import stat
for root, _1, files in os.walk(directory):
for name in files:
filepath = os.path.join(root, name)
if not os.access(filepath, os.W_OK):
os.chmod(filepath, stat.S_IWUSR)
def _clone_or_pull_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]], zip_urls: List[str]):
"""
Clone a Git repository to a specified location, or pull it if it already exists.
@ -135,13 +160,13 @@ def _clone_or_pull_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]], z
"""
import os
import shutil
from devchat._cli.utils import clone_git_repo, download_and_extract_workflow
from devchat.utils import get_logger
from devchat._cli.utils import download_and_extract_workflow
from devchat._cli.utils import clone_git_repo
logger = get_logger(__name__)
if shutil.which('git') is None:
if shutil.which("git") is None:
# If Git is not installed, download and extract the workflow
for url in zip_urls:
try:
@ -152,13 +177,13 @@ def _clone_or_pull_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]], z
return
if os.path.exists(target_dir):
bak_dir = target_dir + '_bak'
new_dir = target_dir + '_old'
bak_dir = target_dir + "_bak"
new_dir = target_dir + "_old"
if os.path.exists(new_dir):
shutil.rmtree(new_dir, onerror=__onerror)
if os.path.exists(bak_dir):
shutil.rmtree(bak_dir, onerror=__onerror)
print(f'{target_dir} is already exists. Moved to {new_dir}')
print(f"{target_dir} is already exists. Moved to {new_dir}")
clone_git_repo(bak_dir, repo_urls)
try:
shutil.move(target_dir, new_dir)
@ -173,4 +198,4 @@ def _clone_or_pull_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]], z
else:
clone_git_repo(target_dir, repo_urls)
print(f'Updated {target_dir}')
print(f"Updated {target_dir}")

View File

@ -1,19 +1,21 @@
# pylint: disable=import-outside-toplevel
import click
@click.command(help='Manage topics')
@click.option('--list', '-l', 'list_topics', is_flag=True,
help='List topics in reverse chronological order.')
@click.option('--skip', default=0, help='Skip number of topics before showing the list.')
@click.option('-n', '--max-count', default=100, help='Limit the number of topics to output.')
@click.command(help="Manage topics")
@click.option(
"--list", "-l", "list_topics", is_flag=True, help="List topics in reverse chronological order."
)
@click.option("--skip", default=0, help="Skip number of topics before showing the list.")
@click.option("-n", "--max-count", default=100, help="Limit the number of topics to output.")
def topic(list_topics: bool, skip: int, max_count: int):
"""
Manage topics.
"""
import json
from devchat._cli.utils import get_model_config, handle_errors, init_dir
from devchat.openai import OpenAIChat, OpenAIChatConfig
from devchat.store import Store
from devchat.openai import OpenAIChatConfig, OpenAIChat
from devchat._cli.utils import init_dir, handle_errors, get_model_config
repo_chat_dir, user_chat_dir = init_dir()

View File

@ -1,32 +1,32 @@
# pylint: disable=import-outside-toplevel
from contextlib import contextmanager
import os
import sys
import shutil
from typing import Tuple, List, Optional, Any
import sys
import zipfile
from contextlib import contextmanager
from typing import Any, List, Optional, Tuple
from devchat._cli.errors import MissContentInPromptException
from devchat.utils import find_root_dir, add_gitignore, setup_logger, get_logger
from devchat.utils import add_gitignore, find_root_dir, get_logger, setup_logger
logger = get_logger(__name__)
def download_and_extract_workflow(workflow_url, target_dir):
import requests
# Download the workflow zip file
response = requests.get(workflow_url, stream=True, timeout=10)
# Downaload file to temp dir
os.makedirs(target_dir, exist_ok=True)
zip_path = os.path.join(target_dir, 'workflow.zip')
with open(zip_path, 'wb') as file_handle:
zip_path = os.path.join(target_dir, "workflow.zip")
with open(zip_path, "wb") as file_handle:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
file_handle.write(chunk)
# Extract the zip file
parent_dir = os.path.dirname(target_dir)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(parent_dir)
# Delete target directory if exists
@ -34,7 +34,7 @@ def download_and_extract_workflow(workflow_url, target_dir):
shutil.rmtree(target_dir)
# Rename extracted directory to target directory
extracted_dir = os.path.join(parent_dir, 'workflows-main')
extracted_dir = os.path.join(parent_dir, "workflows-main")
os.rename(extracted_dir, target_dir)
@ -58,9 +58,11 @@ def handle_errors():
print(f"{type(error).__name__}: {error}", file=sys.stderr)
sys.exit(1)
REPO_CHAT_DIR = None
USER_CHAT_DIR = None
def init_dir() -> Tuple[str, str]:
"""
Initialize the chat directories.
@ -69,7 +71,6 @@ def init_dir() -> Tuple[str, str]:
REPO_CHAT_DIR: The chat directory in the repository.
USER_CHAT_DIR: The chat directory in the user's home.
"""
# pylint: disable=global-statement
global REPO_CHAT_DIR
global USER_CHAT_DIR
if REPO_CHAT_DIR and USER_CHAT_DIR:
@ -108,8 +109,8 @@ def init_dir() -> Tuple[str, str]:
sys.exit(1)
try:
setup_logger(os.path.join(REPO_CHAT_DIR, 'error.log'))
add_gitignore(REPO_CHAT_DIR, '*')
setup_logger(os.path.join(REPO_CHAT_DIR, "error.log"))
add_gitignore(REPO_CHAT_DIR, "*")
except Exception as exc:
logger.error("Failed to setup logger or add .gitignore: %s", exc)
@ -125,7 +126,7 @@ def valid_git_repo(target_dir: str, valid_urls: List[str]) -> bool:
:return: True if the directory is a valid Git repository with a valid URL, False otherwise.
"""
try:
from git import Repo, InvalidGitRepositoryError
from git import InvalidGitRepositoryError, Repo
except Exception:
pass
@ -148,7 +149,7 @@ def clone_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]]):
:param repo_urls: A list of possible Git repository URLs.
"""
try:
from git import Repo, GitCommandError
from git import GitCommandError, Repo
except Exception:
pass
@ -164,8 +165,8 @@ def clone_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]]):
raise GitCommandError(f"Failed to clone repository to {target_dir}")
def get_model_config(user_chat_dir: str,
model: Optional[str] = None) -> Tuple[str, Any]:
def get_model_config(user_chat_dir: str, model: Optional[str] = None) -> Tuple[str, Any]:
from devchat.config import ConfigManager
manager = ConfigManager(user_chat_dir)
return manager.model_config(model)

View File

@ -1,5 +1,3 @@
from .anthropic_chat import AnthropicChatParameters
__all__ = [
'AnthropicChatParameters'
]
__all__ = ["AnthropicChatParameters"]

View File

@ -1,8 +1,9 @@
from typing import List, Optional, Dict, Any
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class AnthropicChatParameters(BaseModel, extra='ignore'):
class AnthropicChatParameters(BaseModel, extra="ignore"):
max_tokens_to_sample: int = Field(1024, ge=1)
stop_sequences: Optional[List[str]]
temperature: Optional[float] = Field(0.2, ge=0, le=1)

View File

@ -1,15 +1,14 @@
import json
import sys
import time
from typing import Optional, List, Iterator
from typing import Iterator, List, Optional
from devchat.message import Message
from devchat.chat import Chat
from devchat.message import Message
from devchat.openai.openai_prompt import OpenAIPrompt
from devchat.store import Store
from devchat.utils import get_logger
logger = get_logger(__name__)
@ -37,14 +36,20 @@ class Assistant:
def _check_limit(self):
if self._prompt.request_tokens > self.token_limit:
raise ValueError(f"Prompt tokens {self._prompt.request_tokens} "
f"beyond limit {self.token_limit}.")
raise ValueError(
f"Prompt tokens {self._prompt.request_tokens} " f"beyond limit {self.token_limit}."
)
def make_prompt(self, request: str,
instruct_contents: Optional[List[str]], context_contents: Optional[List[str]],
functions: Optional[List[dict]],
parent: Optional[str] = None, references: Optional[List[str]] = None,
function_name: Optional[str] = None):
def make_prompt(
self,
request: str,
instruct_contents: Optional[List[str]],
context_contents: Optional[List[str]],
functions: Optional[List[dict]],
parent: Optional[str] = None,
references: Optional[List[str]] = None,
function_name: Optional[str] = None,
):
"""
Make a prompt for the chat API.
@ -59,7 +64,7 @@ class Assistant:
self._check_limit()
# Add instructions to the prompt
if instruct_contents:
combined_instruct = ''.join(instruct_contents)
combined_instruct = "".join(instruct_contents)
self._prompt.append_new(Message.INSTRUCT, combined_instruct)
self._check_limit()
# Add context to the prompt
@ -77,8 +82,9 @@ class Assistant:
for reference_hash in references:
prompt = self._store.get_prompt(reference_hash)
if not prompt:
logger.error("Reference %s not retrievable while making prompt.",
reference_hash)
logger.error(
"Reference %s not retrievable while making prompt.", reference_hash
)
continue
self._prompt.references.append(reference_hash)
self._prompt.prepend_history(prompt, self.token_limit)
@ -111,8 +117,10 @@ class Assistant:
try:
if hasattr(chunk, "dict"):
chunk = chunk.dict()
if "function_call" in chunk["choices"][0]["delta"] and \
not chunk["choices"][0]["delta"]["function_call"]:
if (
"function_call" in chunk["choices"][0]["delta"]
and not chunk["choices"][0]["delta"]["function_call"]
):
del chunk["choices"][0]["delta"]["function_call"]
if not chunk["choices"][0]["delta"]["content"]:
chunk["choices"][0]["delta"]["content"] = ""
@ -123,8 +131,8 @@ class Assistant:
chunk["model"] = config_params["model"]
chunk["choices"][0]["index"] = 0
chunk["choices"][0]["finish_reason"] = "stop"
if "role" not in chunk['choices'][0]['delta']:
chunk['choices'][0]['delta']['role']='assistant'
if "role" not in chunk["choices"][0]["delta"]:
chunk["choices"][0]["delta"]["role"] = "assistant"
delta = self._prompt.append_response(json.dumps(chunk))
yield delta
@ -136,9 +144,9 @@ class Assistant:
raise RuntimeError("No responses returned from the chat API")
if self._need_store:
self._store.store_prompt(self._prompt)
yield self._prompt.formatted_footer(0) + '\n'
yield self._prompt.formatted_footer(0) + "\n"
for index in range(1, len(self._prompt.responses)):
yield self._prompt.formatted_full_response(index) + '\n'
yield self._prompt.formatted_full_response(index) + "\n"
else:
response_str = self._chat.complete_response(self._prompt)
self._prompt.set_response(response_str)
@ -147,4 +155,4 @@ class Assistant:
if self._need_store:
self._store.store_prompt(self._prompt)
for index in range(len(self._prompt.responses)):
yield self._prompt.formatted_full_response(index) + '\n'
yield self._prompt.formatted_full_response(index) + "\n"

View File

@ -1,5 +1,6 @@
from abc import ABC, abstractmethod
from typing import Iterator
from devchat.prompt import Prompt

View File

@ -1,7 +1,6 @@
import time
from devchat.chatmark import Button, Checkbox, Form, Radio, Step, TextEditor # pylint: disable=E402
from devchat.chatmark import Button, Checkbox, Form, Radio, Step, TextEditor
def main():

View File

@ -1,5 +1,3 @@
# pylint: disable=C0103
# pylint: disable=W0212
from typing import Dict, List, Optional, Union
from .iobase import pipe_interaction

View File

@ -1,18 +1,21 @@
import os
import sys
from typing import List, Dict, Tuple, Optional
from pydantic import BaseModel
from typing import Dict, List, Optional, Tuple
import oyaml as yaml
from pydantic import BaseModel
class GeneralProviderConfig(BaseModel):
api_key: Optional[str]
api_base: Optional[str]
class ModelConfig(BaseModel):
max_input_tokens: Optional[int] = sys.maxsize
provider: Optional[str]
class GeneralModelConfig(ModelConfig):
max_tokens: Optional[int]
stop_sequences: Optional[List[str]]
@ -30,7 +33,7 @@ class ChatConfig(BaseModel):
class ConfigManager:
def __init__(self, dir_path: str):
self.config_path = os.path.join(dir_path, 'config.yml')
self.config_path = os.path.join(dir_path, "config.yml")
if not os.path.exists(self.config_path):
self._create_sample_file()
self._file_is_new = True
@ -47,14 +50,14 @@ class ConfigManager:
return os.path.getmtime(self.config_path)
def _load_and_validate_config(self) -> ChatConfig:
with open(self.config_path, 'r', encoding='utf-8') as file:
with open(self.config_path, "r", encoding="utf-8") as file:
data = yaml.safe_load(file)
if 'providers' in data:
for provider, config in data['providers'].items():
data['providers'][provider] = GeneralProviderConfig(**config)
for model, config in data['models'].items():
data['models'][model] = GeneralModelConfig(**config)
if "providers" in data:
for provider, config in data["providers"].items():
data["providers"][provider] = GeneralProviderConfig(**config)
for model, config in data["models"].items():
data["models"][model] = GeneralModelConfig(**config)
return ChatConfig(**data)
@ -70,9 +73,7 @@ class ConfigManager:
return model_id, self.config.models[model_id]
def update_model_config(
self,
model_id: str,
new_config: GeneralModelConfig
self, model_id: str, new_config: GeneralModelConfig
) -> GeneralModelConfig:
_, old_config = self.model_config(model_id)
if new_config.max_input_tokens is not None:
@ -83,46 +84,29 @@ class ConfigManager:
return self.config.models[model_id]
def sync(self):
with open(self.config_path, 'w', encoding='utf-8') as file:
with open(self.config_path, "w", encoding="utf-8") as file:
yaml.dump(self.config.dict(exclude_unset=True), file)
def _create_sample_file(self):
sample_config = ChatConfig(
providers={
"devchat.ai": GeneralProviderConfig(
api_key=""
),
"openai.com": GeneralProviderConfig(
api_key=""
),
"general": GeneralProviderConfig(
)
"devchat.ai": GeneralProviderConfig(api_key=""),
"openai.com": GeneralProviderConfig(api_key=""),
"general": GeneralProviderConfig(),
},
models={
"gpt-4": GeneralModelConfig(
max_input_tokens=6000,
provider='devchat.ai',
temperature=0,
stream=True
max_input_tokens=6000, provider="devchat.ai", temperature=0, stream=True
),
"gpt-3.5-turbo-16k": GeneralModelConfig(
max_input_tokens=12000,
provider='devchat.ai',
temperature=0,
stream=True
max_input_tokens=12000, provider="devchat.ai", temperature=0, stream=True
),
"gpt-3.5-turbo": GeneralModelConfig(
max_input_tokens=3000,
provider='devchat.ai',
temperature=0,
stream=True
max_input_tokens=3000, provider="devchat.ai", temperature=0, stream=True
),
"claude-2": GeneralModelConfig(
provider='general',
max_tokens=20000
)
"claude-2": GeneralModelConfig(provider="general", max_tokens=20000),
},
default_model="gpt-3.5-turbo"
default_model="gpt-3.5-turbo",
)
with open(self.config_path, 'w', encoding='utf-8') as file:
with open(self.config_path, "w", encoding="utf-8") as file:
yaml.dump(sample_config.dict(exclude_unset=True), file)

View File

@ -1,14 +1,14 @@
from .command_parser import parse_command, Command, CommandParser
from .command_parser import Command, CommandParser, parse_command
from .namespace import Namespace
from .recursive_prompter import RecursivePrompter
from .router import run_command, load_workflow_instruction
from .router import load_workflow_instruction, run_command
__all__ = [
'parse_command',
'Command',
'CommandParser',
'Namespace',
'RecursivePrompter',
'run_command',
'load_workflow_instruction'
"parse_command",
"Command",
"CommandParser",
"Namespace",
"RecursivePrompter",
"run_command",
"load_workflow_instruction",
]

View File

@ -1,7 +1,9 @@
import os
from typing import List, Dict, Optional
from pydantic import BaseModel
from typing import Dict, List, Optional
import oyaml as yaml
from pydantic import BaseModel
from .namespace import Namespace
@ -21,7 +23,7 @@ class Command(BaseModel):
path: Optional[str] = None
class CommandParser():
class CommandParser:
def __init__(self, namespace: Namespace):
self.namespace = namespace
@ -32,7 +34,7 @@ class CommandParser():
:param name: The command name in the namespace.
:return: The JSON representation of the command.
"""
file_path = self.namespace.get_file(name, 'command.yml')
file_path = self.namespace.get_file(name, "command.yml")
if not file_path:
return None
return parse_command(file_path)
@ -48,9 +50,9 @@ def parse_command(file_path: str) -> Command:
# get path from file_path, /xx1/xx2/xx3.py => /xx1/xx2
config_dir = os.path.dirname(file_path)
with open(file_path, 'r', encoding='utf-8') as file:
with open(file_path, "r", encoding="utf-8") as file:
# replace {curpath} with config_dir
content = file.read().replace('$command_path', config_dir.replace('\\', '/'))
content = file.read().replace("$command_path", config_dir.replace("\\", "/"))
config_dict = yaml.safe_load(content)
config = Command(**config_dict)
config.path = file_path

View File

@ -1,37 +1,39 @@
"""
Run Command with a input text.
"""
import os
import sys
import json
import threading
import subprocess
from typing import List, Dict
import os
import shlex
import subprocess
import sys
import threading
from typing import Dict, List
from devchat.utils import get_logger
from .command_parser import Command
from .util import ToolUtil
logger = get_logger(__name__)
DEVCHAT_COMMAND_MISS_ERROR_MESSAGE = (
'devchat-commands environment is not installed yet. '
'Please install it before using the current command.'
'The devchat-command environment is automatically '
'installed after the plugin starts,'
' and details can be viewed in the output window.'
"devchat-commands environment is not installed yet. "
"Please install it before using the current command."
"The devchat-command environment is automatically "
"installed after the plugin starts,"
" and details can be viewed in the output window."
)
def pipe_reader(pipe, out_data, out_flag):
while pipe:
data = pipe.read(1)
if data == '':
if data == "":
break
out_data['out'] += data
print(data, end='', file=out_flag, flush=True)
out_data["out"] += data
print(data, end="", file=out_flag, flush=True)
# Equivalent of CommandRun in Python\which executes subprocesses
@ -40,21 +42,25 @@ class CommandRunner:
self.process = None
self._model_name = model_name
def run_command(self,
command_name: str,
command: Command,
history_messages: List[Dict],
input_text: str,
parent_hash: str):
def run_command(
self,
command_name: str,
command: Command,
history_messages: List[Dict],
input_text: str,
parent_hash: str,
):
"""
if command has parameters, then generate command parameters from input by LLM
if command.input is "required", and input is null, then return error
"""
input_text = input_text.strip()\
.replace(f'/{command_name}', '')\
.replace('\"', '\\"')\
.replace('\'', '\\\'')\
.replace('\n', '\\n')
input_text = (
input_text.strip()
.replace(f"/{command_name}", "")
.replace('"', '\\"')
.replace("'", "\\'")
.replace("\n", "\\n")
)
arguments = {}
if command.parameters and len(command.parameters) > 0:
@ -69,20 +75,19 @@ class CommandRunner:
return self.run_command_with_parameters(
command_name=command_name,
command=command,
parameters={
"input": input_text,
**arguments
},
parameters={"input": input_text, **arguments},
parent_hash=parent_hash,
history_messages=history_messages
history_messages=history_messages,
)
def run_command_with_parameters(self,
command_name: str,
command: Command,
parameters: Dict[str, str],
parent_hash: str,
history_messages: List[Dict]):
def run_command_with_parameters(
self,
command_name: str,
command: Command,
parameters: Dict[str, str],
parent_hash: str,
history_messages: List[Dict],
):
"""
replace $xxx in command.steps[0].run with parameters[xxx]
then run command.steps[0].run
@ -91,17 +96,13 @@ class CommandRunner:
try:
env = os.environ.copy()
env.update(parameters)
env.update(
self.__load_command_runtime(command)
)
env.update(
self.__load_chat_data(self._model_name, parent_hash, history_messages)
)
env.update(self.__load_command_runtime(command))
env.update(self.__load_chat_data(self._model_name, parent_hash, history_messages))
self.__update_devchat_python_path(env, command.steps[0]["run"])
command_run = command.steps[0]["run"]
for parameter in env:
command_run = command_run.replace('$' + parameter, str(env[parameter]))
command_run = command_run.replace("$" + parameter, str(env[parameter]))
if self.__check_command_python_error(command_run, env):
return result
@ -124,14 +125,15 @@ class CommandRunner:
"""
run command string
"""
def handle_output(process):
stdout_data, stderr_data = {'out': ''}, {'out': ''}
stdout_data, stderr_data = {"out": ""}, {"out": ""}
stdout_thread = threading.Thread(
target=pipe_reader,
args=(process.stdout, stdout_data, sys.stdout))
target=pipe_reader, args=(process.stdout, stdout_data, sys.stdout)
)
stderr_thread = threading.Thread(
target=pipe_reader,
args=(process.stderr, stderr_data, sys.stderr))
target=pipe_reader, args=(process.stderr, stderr_data, sys.stderr)
)
stdout_thread.start()
stderr_thread.start()
stdout_thread.join()
@ -142,17 +144,17 @@ class CommandRunner:
if isinstance(env[key], (List, Dict)):
env[key] = json.dumps(env[key])
with subprocess.Popen(
shlex.split(command_str),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
text=True
) as process:
shlex.split(command_str),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
text=True,
) as process:
return handle_output(process)
def __check_command_python_error(self, command_run: str, parameters: Dict[str, str]):
need_command_python = command_run.find('$command_python ') != -1
has_command_python = parameters.get('command_python', None)
need_command_python = command_run.find("$command_python ") != -1
has_command_python = parameters.get("command_python", None)
if need_command_python and not has_command_python:
print(DEVCHAT_COMMAND_MISS_ERROR_MESSAGE, file=sys.stderr, flush=True)
@ -162,9 +164,9 @@ class CommandRunner:
def __get_readme(self, command: Command):
try:
command_dir = os.path.dirname(command.path)
readme_file = os.path.join(command_dir, 'README.md')
readme_file = os.path.join(command_dir, "README.md")
if os.path.exists(readme_file):
with open(readme_file, 'r', encoding='utf8') as file:
with open(readme_file, "r", encoding="utf8") as file:
readme = file.read()
return readme
return None
@ -172,8 +174,8 @@ class CommandRunner:
return None
def __check_input_miss_error(
self, command: Command, command_name: str, parameters: Dict[str, str]
):
self, command: Command, command_name: str, parameters: Dict[str, str]
):
is_input_required = command.input == "required"
if not (is_input_required and parameters["input"] == ""):
return False
@ -197,7 +199,7 @@ class CommandRunner:
missed_parameters = []
for parameter_name in parameter_names:
if command_run.find('$' + parameter_name) != -1:
if command_run.find("$" + parameter_name) != -1:
missed_parameters.append(parameter_name)
if len(missed_parameters) == 0:
@ -216,22 +218,21 @@ class CommandRunner:
# visit each path in command_path, for example: /usr/x1/x2/x3
# then load visit: /usr, /usr/x1, /usr/x1/x2, /usr/x1/x2/x3
paths = command_path.split('/')
for index in range(1, len(paths)+1):
paths = command_path.split("/")
for index in range(1, len(paths) + 1):
try:
path = '/'.join(paths[:index])
runtime_file = os.path.join(path, 'runtime.json')
path = "/".join(paths[:index])
runtime_file = os.path.join(path, "runtime.json")
if os.path.exists(runtime_file):
with open(runtime_file, 'r', encoding='utf8') as file:
with open(runtime_file, "r", encoding="utf8") as file:
command_runtime_config = json.loads(file.read())
runtime_config.update(command_runtime_config)
except Exception:
pass
# for windows
if runtime_config.get('command_python', None):
runtime_config['command_python'] = \
runtime_config['command_python'].replace('\\', '/')
if runtime_config.get("command_python", None):
runtime_config["command_python"] = runtime_config["command_python"].replace("\\", "/")
return runtime_config
def __load_chat_data(self, model_name: str, parent_hash: str, history_messages: List[Dict]):
@ -242,16 +243,15 @@ class CommandRunner:
}
def __update_devchat_python_path(self, env: Dict[str, str], command_run: str):
python_path = os.environ.get('PYTHONPATH', '')
env['DEVCHAT_PYTHONPATH'] = os.environ.get('DEVCHAT_PYTHONPATH', python_path)
if command_run.find('$devchat_python ') == -1:
del env['PYTHONPATH']
env["devchat_python"] = sys.executable.replace('\\', '/')
python_path = os.environ.get("PYTHONPATH", "")
env["DEVCHAT_PYTHONPATH"] = os.environ.get("DEVCHAT_PYTHONPATH", python_path)
if command_run.find("$devchat_python ") == -1:
del env["PYTHONPATH"]
env["devchat_python"] = sys.executable.replace("\\", "/")
def _call_function_by_llm(self,
command_name: str,
command: Command,
history_messages: List[Dict]):
def _call_function_by_llm(
self, command_name: str, command: Command, history_messages: List[Dict]
):
"""
command needs multi parameters, so we need parse each
parameter by LLM from input_text

View File

@ -1,17 +1,16 @@
import os
from typing import List, Optional
import re
from typing import List, Optional
class Namespace:
def __init__(self, root_path: str,
branches: List[str] = None):
def __init__(self, root_path: str, branches: List[str] = None):
"""
:param root_path: The root path of the namespace.
:param branches: The hidden branches with ascending order of priority.
"""
self.root_path = root_path
self.branches = branches if branches else ['sys', 'org', 'usr']
self.branches = branches if branches else ["sys", "org", "usr"]
@staticmethod
def is_valid_name(name: str) -> bool:
@ -28,7 +27,7 @@ class Namespace:
# The regular expression pattern for a valid name
if name is None:
return False
pattern = r'^$|^(?!.*\.\.)[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)*$'
pattern = r"^$|^(?!.*\.\.)[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)*$"
return bool(re.match(pattern, name))
def get_file(self, name: str, file: str) -> Optional[str]:
@ -40,7 +39,7 @@ class Namespace:
if not self.is_valid_name(name):
return None
# Convert the dot-separated name to a path
path = os.path.join(*name.split('.'))
path = os.path.join(*name.split("."))
for branch in reversed(self.branches):
full_path = os.path.join(self.root_path, branch, path)
if os.path.isdir(full_path):
@ -60,7 +59,7 @@ class Namespace:
if not self.is_valid_name(name):
raise ValueError(f"Invalid name to list files: {name}")
# Convert the dot-separated name to a path
path = os.path.join(*name.split('.'))
path = os.path.join(*name.split("."))
files = {}
path_found = False
for branch in self.branches:
@ -77,7 +76,7 @@ class Namespace:
# Sort the files in alphabetical order before returning
return sorted(files.values()) if files else []
def list_names(self, name: str = '', recursive: bool = False) -> List[str]:
def list_names(self, name: str = "", recursive: bool = False) -> List[str]:
"""
:param name: The command name in the namespace. Defaults to the root.
:param recursive: Whether to list all descendant names or only child names.
@ -86,7 +85,7 @@ class Namespace:
if not self.is_valid_name(name):
raise ValueError(f"Invalid name to list names: {name}")
commands = set()
path = os.path.join(*name.split('.'))
path = os.path.join(*name.split("."))
found = False
for branch in self.branches:
full_path = os.path.join(self.root_path, branch, path)
@ -101,10 +100,10 @@ class Namespace:
def _add_dirnames_to_commands(self, full_path: str, name: str, commands: set):
for dirname in os.listdir(full_path):
if dirname.startswith('.'):
if dirname.startswith("."):
continue
if os.path.isdir(os.path.join(full_path, dirname)):
command_name = '.'.join([name, dirname]) if name else dirname
command_name = ".".join([name, dirname]) if name else dirname
commands.add(command_name)
def _add_recursive_dirnames_to_commands(self, full_path: str, name: str, commands: set):
@ -112,10 +111,10 @@ class Namespace:
def _recursive_dir_walk(self, full_path: str, name: str, commands: set):
for dirname in os.listdir(full_path):
if dirname.startswith('.'):
if dirname.startswith("."):
continue
dir_path = os.path.join(full_path, dirname)
if os.path.isdir(dir_path):
command_name = '.'.join([name, dirname]) if name else dirname
command_name = ".".join([name, dirname]) if name else dirname
commands.add(command_name)
self._recursive_dir_walk(dir_path, command_name, commands)

View File

@ -1,5 +1,6 @@
import re
import os
import re
from .namespace import Namespace
@ -8,30 +9,30 @@ class RecursivePrompter:
self.namespace = namespace
def run(self, name: str) -> str:
ancestors = name.split('.')
merged_content = ''
ancestors = name.split(".")
merged_content = ""
for index in range(len(ancestors)):
ancestor_name = '.'.join(ancestors[:index + 1])
file_path = self.namespace.get_file(ancestor_name, 'prompt.txt')
ancestor_name = ".".join(ancestors[: index + 1])
file_path = self.namespace.get_file(ancestor_name, "prompt.txt")
if file_path:
with open(file_path, 'r', encoding='utf-8') as file:
with open(file_path, "r", encoding="utf-8") as file:
prompt_content = file.read()
# replace @file@ with the content of the file
prompt_content = self._replace_file_references(file_path, prompt_content)
merged_content += prompt_content
merged_content += '\n'
merged_content += "\n"
return merged_content
def _replace_file_references(self, prompt_file_path: str, content: str) -> str:
# prompt_file_path is the path to the file that contains the content
# @relative file path@: file is relative to the prompt_file_path
pattern = re.compile(r'@(.+?)@')
pattern = re.compile(r"@(.+?)@")
matches = pattern.findall(content)
for match in matches:
file_path = os.path.join(os.path.dirname(prompt_file_path), match)
if os.path.exists(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
with open(file_path, "r", encoding="utf-8") as file:
file_content = file.read()
content = content.replace(f'@{match}@', file_content)
content = content.replace(f"@{match}@", file_content)
return content

View File

@ -1,18 +1,20 @@
import os
from typing import List
from .command_runner import CommandRunner
from .util import CommandUtil
from .namespace import Namespace
from.recursive_prompter import RecursivePrompter
from .recursive_prompter import RecursivePrompter
from .util import CommandUtil
def load_workflow_instruction(user_input: str):
user_input = user_input.strip()
if len(user_input) == 0:
return None
if user_input[:1] != '/':
if user_input[:1] != "/":
return None
workflows_dir = os.path.join(os.path.expanduser('~/.chat'), 'workflows')
workflows_dir = os.path.join(os.path.expanduser("~/.chat"), "workflows")
if not os.path.exists(workflows_dir):
return None
if not os.path.isdir(workflows_dir):
@ -28,19 +30,16 @@ def load_workflow_instruction(user_input: str):
def run_command(
model_name: str,
history_messages: List[dict],
input_text: str,
parent_hash: str,
auto_fun: bool):
model_name: str, history_messages: List[dict], input_text: str, parent_hash: str, auto_fun: bool
):
"""
load command config, and then run Command
"""
# split input_text by ' ','\n','\t'
if len(input_text.strip()) == 0:
return None
if input_text.strip()[:1] != '/':
if not (auto_fun and model_name.startswith('gpt-')):
if input_text.strip()[:1] != "/":
if not (auto_fun and model_name.startswith("gpt-")):
return None
# TODO
@ -60,5 +59,5 @@ def run_command(
command=command_obj,
history_messages=history_messages,
input_text=input_text,
parent_hash=parent_hash
parent_hash=parent_hash,
)

View File

@ -1,26 +1,25 @@
# pylint: disable=import-outside-toplevel
import json
import os
import sys
import json
from typing import List, Dict
from typing import Dict, List
from devchat._cli.utils import init_dir
from devchat.utils import get_logger
from .command_parser import Command, CommandParser
from .namespace import Namespace
from .command_parser import CommandParser, Command
logger = get_logger(__name__)
DEFAULT_MODEL = "gpt-3.5-turbo"
class CommandUtil:
@staticmethod
def __command_parser():
_, user_chat_dir = init_dir()
workflows_dir = os.path.join(user_chat_dir, 'workflows')
workflows_dir = os.path.join(user_chat_dir, "workflows")
if not os.path.exists(workflows_dir) or not os.path.isdir(workflows_dir):
return None
@ -42,8 +41,8 @@ class CommandUtil:
return []
command_names = commander.namespace.list_names("", True)
commands = [ (name, commander.parse(name)) for name in command_names ]
return [ cmd for cmd in commands if cmd[1] ]
commands = [(name, commander.parse(name)) for name in command_names]
return [cmd for cmd in commands if cmd[1]]
class ToolUtil:
@ -56,23 +55,20 @@ class ToolUtil:
for key, value in command.parameters.items():
properties[key] = {}
for key1, value1 in value.dict().items():
if key1 not in ['type', 'description', 'enum'] or value1 is None:
if key1 not in ["type", "description", "enum"] or value1 is None:
continue
properties[key][key1] = value1
required.append(key)
elif command.steps[0]['run'].find('$input') > 0:
properties['input'] = {
"type": "string",
"description": "input text"
}
required.append('input')
elif command.steps[0]["run"].find("$input") > 0:
properties["input"] = {"type": "string", "description": "input text"}
required.append("input")
return properties, required
@staticmethod
def make_function(command: Command, command_name: str):
properties, required = ToolUtil.__make_function_parameters(command)
command_name = command_name.replace('.', '---')
command_name = command_name.replace(".", "---")
return {
"type": "function",
@ -84,37 +80,37 @@ class ToolUtil:
"properties": properties,
"required": required,
},
}
},
}
@staticmethod
def select_function_by_llm(
history_messages: List[Dict], tools: List[Dict], model: str = DEFAULT_MODEL
):
import openai
history_messages: List[Dict], tools: List[Dict], model: str = DEFAULT_MODEL
):
import httpx
import openai
proxy_url = os.environ.get("DEVCHAT_PROXY", "")
proxy_setting ={"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
proxy_setting = (
{"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
)
client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY", None),
base_url=os.environ.get("OPENAI_API_BASE", None),
http_client=httpx.Client(**proxy_setting, trust_env=False)
http_client=httpx.Client(**proxy_setting, trust_env=False),
)
try:
response = client.chat.completions.create(
messages=history_messages,
model=model,
stream=False,
tools=tools
messages=history_messages, model=model, stream=False, tools=tools
)
respose_message = response.dict()["choices"][0]["message"]
if not respose_message['tool_calls']:
if not respose_message["tool_calls"]:
return None
tool_call = respose_message['tool_calls'][0]['function']
if tool_call['name'] != tools[0]["function"]["name"]:
tool_call = respose_message["tool_calls"][0]["function"]
if tool_call["name"] != tools[0]["function"]["name"]:
error_msg = (
"The LLM returned an invalid function name. "
f"Expected: {tools[0]['function']['name']}, "
@ -123,8 +119,8 @@ class ToolUtil:
print(error_msg, file=sys.stderr, flush=True)
return None
return {
"name": tool_call['name'].replace('---', '.'),
"arguments": json.loads(tool_call['arguments'])
"name": tool_call["name"].replace("---", "."),
"arguments": json.loads(tool_call["arguments"]),
}
except (ConnectionError, openai.APIConnectionError) as err:
print("ConnectionError:", err, file=sys.stderr, flush=True)
@ -139,25 +135,22 @@ class ToolUtil:
return None
@staticmethod
def _create_tool(command_name:str, command: Command) -> dict:
def _create_tool(command_name: str, command: Command) -> dict:
properties = {}
required = []
if command.parameters:
for key, value in command.parameters.items():
properties[key] = {}
for key1, value1 in value.dict().items():
if key1 not in ['type', 'description', 'enum'] or value1 is None:
if key1 not in ["type", "description", "enum"] or value1 is None:
continue
properties[key][key1] = value1
required.append(key)
elif command.steps[0]['run'].find('$input') > 0:
properties['input'] = {
"type": "string",
"description": "input text"
}
required.append('input')
elif command.steps[0]["run"].find("$input") > 0:
properties["input"] = {"type": "string", "description": "input text"}
required.append("input")
command_name = command_name.replace('.', '---')
command_name = command_name.replace(".", "---")
return {
"type": "function",
"function": {
@ -168,6 +161,5 @@ class ToolUtil:
"properties": properties,
"required": required,
},
}
},
}

View File

@ -1,5 +1,5 @@
from .service import IDEService
from .types import *
from .types import * # noqa: F403
from .types import __all__ as types_all
__all__ = types_all + [

View File

@ -1,5 +1,6 @@
from .types import LocationWithText
from .rpc import rpc_method
from .types import LocationWithText
class IdeaIDEService:
def __init__(self):
@ -7,8 +8,8 @@ class IdeaIDEService:
@rpc_method
def get_visible_range(self) -> LocationWithText:
return LocationWithText.parse_obj(self._result)
return LocationWithText.parse_obj(self._result)
@rpc_method
def get_selected_range(self) -> LocationWithText:
return LocationWithText.parse_obj(self._result)
return LocationWithText.parse_obj(self._result)

View File

@ -1,8 +1,3 @@
# pylint: disable=C0103
# pylint: disable=W3101
# pylint: disable=W0719
# pylint: disable=R1710
# pylint: disable=W0212
import os
from functools import wraps

View File

@ -1,17 +1,9 @@
# disable pylint
# pylint: disable=W0613
# pylint: disable=E1133
# pylint: disable=R1710
# pylint: disable=W0719
# pylint: disable=W3101
# pylint: disable=C0103
from typing import List
from .rpc import rpc_method
from .types import Location, SymbolNode, LocationWithText
from .vscode_services import selected_range, visible_range
from .idea_services import IdeaIDEService
from .rpc import rpc_method
from .types import Location, LocationWithText, SymbolNode
from .vscode_services import selected_range, visible_range
class IDEService:
@ -39,7 +31,7 @@ class IDEService:
@rpc_method
def install_python_env(self, command_name: str, requirements_file: str) -> str:
"""
A method to install a Python environment with the provided command name
A method to install a Python environment with the provided command name
and requirements file, returning python path installed.
Command name is the name of the environment to be installed.
"""
@ -136,7 +128,7 @@ class IDEService:
Determines and returns the visible range of code in the current IDE.
Returns:
A tuple denoting the visible range if the IDE is VSCode, or defers to
A tuple denoting the visible range if the IDE is VSCode, or defers to
IdeaIDEService's get_visible_range method for other IDEs.
"""
if self.ide_name() == "vscode":

View File

@ -2,13 +2,7 @@ from typing import List
from pydantic import BaseModel
__all__ = [
"Position",
"Range",
"Location",
"SymbolNode",
"LocationWithText"
]
__all__ = ["Position", "Range", "Location", "SymbolNode", "LocationWithText"]
class Position(BaseModel):

View File

@ -5,12 +5,12 @@ from .types import LocationWithText
@rpc_call
def run_code(code: str): # pylint: disable=unused-argument
def run_code(code: str):
pass
@rpc_call
def diff_apply(filepath, content): # pylint: disable=unused-argument
def diff_apply(filepath, content):
pass
@ -110,6 +110,7 @@ def visible_lines():
"visibleRange": [start_line, end_line],
}
def visible_range() -> LocationWithText:
visible_range_text = visible_lines()
return LocationWithText(
@ -124,7 +125,7 @@ def visible_range() -> LocationWithText:
"line": visible_range_text["visibleRange"][1],
"character": 0,
},
}
},
)
@ -159,6 +160,7 @@ def selected_lines():
"selectedRange": [start_line, start_col, end_line, end_col],
}
def selected_range() -> LocationWithText:
selected_range_text = selected_lines()
return LocationWithText(
@ -173,5 +175,5 @@ def selected_range() -> LocationWithText:
"line": selected_range_text["selectedRange"][2],
"character": selected_range_text["selectedRange"][3],
},
}
},
)

View File

@ -47,7 +47,7 @@ def chat(
):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=unused-argument
def wrapper(*args, **kwargs):
nonlocal prompt, memory, model, llm_config
prompt_new = prompt.format(**kwargs)
messages = memory.contexts() if memory else []
@ -86,7 +86,7 @@ def chat_json(
):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=unused-argument
def wrapper(*args, **kwargs):
nonlocal prompt, memory, model, llm_config
prompt_new = prompt.format(**kwargs)
messages = memory.contexts() if memory else []

View File

@ -41,12 +41,12 @@ def chat_completion_stream_commit(
llm_config: Dict, # {"model": "...", ...}
):
proxy_url = os.environ.get("DEVCHAT_PROXY", "")
proxy_setting ={"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
proxy_setting = {"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY", None),
base_url=os.environ.get("OPENAI_API_BASE", None),
http_client=httpx.Client(**proxy_setting, trust_env=False)
http_client=httpx.Client(**proxy_setting, trust_env=False),
)
llm_config["stream"] = True
@ -56,12 +56,12 @@ def chat_completion_stream_commit(
def chat_completion_stream_raw(**kwargs):
proxy_url = os.environ.get("DEVCHAT_PROXY", "")
proxy_setting ={"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
proxy_setting = {"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY", None),
base_url=os.environ.get("OPENAI_API_BASE", None),
http_client=httpx.Client(**proxy_setting, trust_env=False)
http_client=httpx.Client(**proxy_setting, trust_env=False),
)
kwargs["stream"] = True
@ -87,7 +87,7 @@ def retry_timeout(chunks):
def chunk_list(chunks):
return [chunk for chunk in chunks] # pylint: disable=R1721
return [chunk for chunk in chunks]
def chunks_content(chunks):
@ -164,13 +164,13 @@ chat_completion_no_stream_return_json_with_retry = exception_handle(
exception_output_handle(lambda err: None),
)
def chat_completion_no_stream_return_json(
messages: List[Dict], llm_config: Dict):
def chat_completion_no_stream_return_json(messages: List[Dict], llm_config: Dict):
"""call llm without stream, return json object"""
llm_config["response_format"]={"type": "json_object"}
llm_config["response_format"] = {"type": "json_object"}
return chat_completion_no_stream_return_json_with_retry(
messages=messages,
llm_config=llm_config)
messages=messages, llm_config=llm_config
)
chat_completion_stream = exception_handle(

View File

@ -19,8 +19,7 @@ def retry(func, times):
raise err.error
continue
except Exception as err:
raise err
raise err.error
raise err.error
return wrapper
@ -62,9 +61,8 @@ def pipeline(*funcs):
def wrapper(*args, **kwargs):
for index, func in enumerate(funcs):
if index > 0:
# pylint: disable=E1101
if isinstance(args, Dict) and args.get("__type__", None) == "parallel":
args = func(*args["value"]) # pylint: disable=E1126
args = func(*args["value"])
else:
args = func(args)
else:

View File

@ -3,9 +3,9 @@ import os
import sys
from functools import wraps
from devchat.memory import ChatMemory
from devchat.chatmark import Form, Radio, TextEditor
from devchat.ide import IDEService
from devchat.memory import ChatMemory
from .openai import chat_call_completion_stream
@ -140,7 +140,7 @@ def chat_tools(
):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=unused-argument
def wrapper(*args, **kwargs):
nonlocal prompt, memory, model, tools, call_confirm_fun, llm_config
prompt = prompt.format(**kwargs)
if not tools:

View File

@ -1,4 +1,3 @@
from .base import ChatMemory
from .fixsize_memory import FixSizeChatMemory

View File

@ -7,6 +7,7 @@ class Message(ABC):
"""
The basic unit of information in a prompt.
"""
content: str = ""
INSTRUCT = "instruct"
@ -22,7 +23,7 @@ class Message(ABC):
@classmethod
@abstractmethod
def from_dict(cls, message_data: dict) -> 'Message':
def from_dict(cls, message_data: dict) -> "Message":
"""
Convert the message from a dictionary.
"""

View File

@ -1,11 +1,11 @@
from .openai_chat import OpenAIChatParameters, OpenAIChatConfig, OpenAIChat
from .openai_chat import OpenAIChat, OpenAIChatConfig, OpenAIChatParameters
from .openai_message import OpenAIMessage
from .openai_prompt import OpenAIPrompt
__all__ = [
'OpenAIChat',
'OpenAIChatConfig',
'OpenAIChatParameters',
'OpenAIMessage',
'OpenAIPrompt'
"OpenAIChat",
"OpenAIChatConfig",
"OpenAIChatParameters",
"OpenAIMessage",
"OpenAIPrompt",
]

View File

@ -4,6 +4,7 @@ import os
import sys
from urllib.parse import urlparse
class LineReader:
def __init__(self, response):
self.response = response
@ -18,7 +19,7 @@ class LineReader:
line = line.strip()
if not line:
return self.__next__()
line = line.decode('utf-8')
line = line.decode("utf-8")
if not line.startswith("data:"):
print("Receive invalid line: {line}", end="\n\n", file=sys.stderr)
raise ValueError(f"Invalid line: {line}")
@ -31,13 +32,17 @@ class LineReader:
print(f"Error decoding JSON: {err}", end="\n\n", file=sys.stderr)
raise ValueError(f"Invalid line: {line}") from err
def stream_response(connection: http.client.HTTPSConnection, data, headers):
connection.request("POST", "/v1/chat/completions", body=json.dumps(data), headers=headers)
response = connection.getresponse()
if response.status != 200:
print(f"Error: {response.status} - {response.reason} {response.read()}",
end="\n\n", file=sys.stderr)
print(
f"Error: {response.status} - {response.reason} {response.read()}",
end="\n\n",
file=sys.stderr,
)
return None
return LineReader(response=response)

View File

@ -1,16 +1,18 @@
# pylint: disable=import-outside-toplevel
import json
import os
from typing import Optional, Union, List, Dict, Iterator
from typing import Dict, Iterator, List, Optional, Union
from pydantic import BaseModel, Field
from devchat.chat import Chat
from devchat.utils import get_user_info, user_id
from .http_openai import stream_request
from .openai_message import OpenAIMessage
from .openai_prompt import OpenAIPrompt
from .http_openai import stream_request
class OpenAIChatParameters(BaseModel, extra='ignore'):
class OpenAIChatParameters(BaseModel, extra="ignore"):
temperature: Optional[float] = Field(0, ge=0, le=2)
top_p: Optional[float] = Field(None, ge=0, le=1)
n: Optional[int] = Field(None, ge=1)
@ -28,6 +30,7 @@ class OpenAIChatConfig(OpenAIChatParameters):
"""
Configuration object for the OpenAIChat APIs.
"""
model: str
@ -35,6 +38,7 @@ class OpenAIChat(Chat):
"""
OpenAIChat class that handles communication with the OpenAI Chat API.
"""
def __init__(self, config: OpenAIChatConfig):
"""
Initialize the OpenAIChat class with a configuration object.
@ -52,83 +56,81 @@ class OpenAIChat(Chat):
return prompt
def load_prompt(self, data: dict) -> OpenAIPrompt:
data['_new_messages'] = {
data["_new_messages"] = {
k: [OpenAIMessage.from_dict(m) for m in v]
if isinstance(v, list) else OpenAIMessage.from_dict(v)
for k, v in data['_new_messages'].items() if k != 'function'
if isinstance(v, list)
else OpenAIMessage.from_dict(v)
for k, v in data["_new_messages"].items()
if k != "function"
}
data["_history_messages"] = {
k: [OpenAIMessage.from_dict(m) for m in v] for k, v in data["_history_messages"].items()
}
data['_history_messages'] = {k: [OpenAIMessage.from_dict(m) for m in v]
for k, v in data['_history_messages'].items()}
return OpenAIPrompt(**data)
def complete_response(self, prompt: OpenAIPrompt) -> str:
import openai
import httpx
import openai
# Filter the config parameters with set values
config_params = self.config.dict(exclude_unset=True)
if prompt.get_functions():
config_params['functions'] = prompt.get_functions()
config_params['function_call'] = 'auto'
config_params['stream'] = False
config_params["functions"] = prompt.get_functions()
config_params["function_call"] = "auto"
config_params["stream"] = False
proxy_url = os.environ.get("DEVCHAT_PROXY", "")
proxy_setting ={"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
proxy_setting = (
{"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
)
client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY", None),
base_url=os.environ.get("OPENAI_API_BASE", None),
http_client=httpx.Client(**proxy_setting, trust_env=False)
http_client=httpx.Client(**proxy_setting, trust_env=False),
)
response = client.chat.completions.create(
messages=prompt.messages,
**config_params
)
response = client.chat.completions.create(messages=prompt.messages, **config_params)
if isinstance(response, openai.types.chat.chat_completion.ChatCompletion):
return json.dumps(response.dict())
return str(response)
def stream_response(self, prompt: OpenAIPrompt) -> Iterator:
api_key=os.environ.get("OPENAI_API_KEY", None)
base_url=os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1/")
api_key = os.environ.get("OPENAI_API_KEY", None)
base_url = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1/")
if not os.environ.get("USE_TIKTOKEN", False) and base_url != "https://api.openai.com/v1/":
config_params = self.config.dict(exclude_unset=True)
if prompt.get_functions():
config_params['functions'] = prompt.get_functions()
config_params['function_call'] = 'auto'
config_params['stream'] = True
config_params["functions"] = prompt.get_functions()
config_params["function_call"] = "auto"
config_params["stream"] = True
data = {
"messages":prompt.messages,
**config_params,
"timeout":180
}
data = {"messages": prompt.messages, **config_params, "timeout": 180}
response = stream_request(api_key, base_url, data)
return response
import openai
import httpx
import openai
# Filter the config parameters with set values
config_params = self.config.dict(exclude_unset=True)
if prompt.get_functions():
config_params['functions'] = prompt.get_functions()
config_params['function_call'] = 'auto'
config_params['stream'] = True
config_params["functions"] = prompt.get_functions()
config_params["function_call"] = "auto"
config_params["stream"] = True
proxy_url = os.environ.get("DEVCHAT_PROXY", "")
proxy_setting ={"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
proxy_setting = (
{"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
)
client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY", None),
base_url=os.environ.get("OPENAI_API_BASE", None),
http_client=httpx.Client(**proxy_setting, trust_env=False)
http_client=httpx.Client(**proxy_setting, trust_env=False),
)
response = client.chat.completions.create(
messages=prompt.messages,
**config_params,
timeout=180
messages=prompt.messages, **config_params, timeout=180
)
return response

View File

@ -1,6 +1,6 @@
import ast
import json
from dataclasses import dataclass, asdict, field, fields
from dataclasses import asdict, dataclass, field, fields
from typing import Dict, Optional
from devchat.message import Message
@ -17,19 +17,21 @@ class OpenAIMessage(Message):
raise ValueError("Invalid role. Must be one of 'system', 'user', or 'assistant'.")
if not self._validate_name():
raise ValueError("Invalid name. Must contain a-z, A-Z, 0-9, and underscores, "
"with a maximum length of 64 characters.")
raise ValueError(
"Invalid name. Must contain a-z, A-Z, 0-9, and underscores, "
"with a maximum length of 64 characters."
)
def to_dict(self) -> dict:
state = asdict(self)
if state['name'] is None:
del state['name']
if not state['function_call'] or len(state['function_call'].keys()) == 0:
del state['function_call']
if state["name"] is None:
del state["name"]
if not state["function_call"] or len(state["function_call"].keys()) == 0:
del state["function_call"]
return state
@classmethod
def from_dict(cls, message_data: dict) -> 'OpenAIMessage':
def from_dict(cls, message_data: dict) -> "OpenAIMessage":
keys = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in message_data.items() if k in keys}
return cls(**kwargs)
@ -44,24 +46,24 @@ class OpenAIMessage(Message):
}
'''
if not self.function_call:
return ''
return ""
function_call_copy = self.function_call.copy()
if 'arguments' in function_call_copy:
if "arguments" in function_call_copy:
# arguments field may be not a json string
# we can try parse it by eval
try:
function_call_copy['arguments'] = ast.literal_eval(function_call_copy['arguments'])
function_call_copy["arguments"] = ast.literal_eval(function_call_copy["arguments"])
except Exception:
# if it is not a json string, we can do nothing
try:
function_call_copy['arguments'] = json.loads(function_call_copy['arguments'])
function_call_copy["arguments"] = json.loads(function_call_copy["arguments"])
except Exception:
pass
return '```command\n' + json.dumps(function_call_copy) + '\n```'
return "```command\n" + json.dumps(function_call_copy) + "\n```"
def stream_from_dict(self, message_data: dict) -> str:
"""Append to the message from a dictionary returned from a streaming chat API."""
delta = message_data.get('content', '')
delta = message_data.get("content", "")
if self.content:
self.content += delta
else:

View File

@ -1,11 +1,12 @@
from dataclasses import dataclass
import json
import sys
from dataclasses import dataclass
from typing import List, Optional
from devchat.prompt import Prompt
from devchat.message import Message
from devchat.utils import update_dict, get_logger
from devchat.utils import openai_message_tokens, openai_response_tokens
from devchat.prompt import Prompt
from devchat.utils import get_logger, openai_message_tokens, openai_response_tokens, update_dict
from .openai_message import OpenAIMessage
logger = get_logger(__name__)
@ -31,9 +32,10 @@ class OpenAIPrompt(Prompt):
combined += [msg.to_dict() for msg in self._new_messages[Message.INSTRUCT]]
# History context
if self._history_messages[Message.CONTEXT]:
combined += [update_dict(msg.to_dict(), 'content',
f"<context>\n{msg.content}\n</context>")
for msg in self._history_messages[Message.CONTEXT]]
combined += [
update_dict(msg.to_dict(), "content", f"<context>\n{msg.content}\n</context>")
for msg in self._history_messages[Message.CONTEXT]
]
# History chat
if self._history_messages[Message.CHAT]:
combined += [msg.to_dict() for msg in self._history_messages[Message.CHAT]]
@ -42,9 +44,10 @@ class OpenAIPrompt(Prompt):
combined += [self.request.to_dict()]
# New context
if self.new_context:
combined += [update_dict(msg.to_dict(), 'content',
f"<context>\n{msg.content}\n</context>")
for msg in self.new_context]
combined += [
update_dict(msg.to_dict(), "content", f"<context>\n{msg.content}\n</context>")
for msg in self.new_context
]
return combined
def input_messages(self, messages: List[dict]):
@ -94,12 +97,13 @@ class OpenAIPrompt(Prompt):
continue
self._history_messages[Message.CHAT].append(last_message)
def append_new(self, message_type: str, content: str,
available_tokens: int = sys.maxsize) -> bool:
def append_new(
self, message_type: str, content: str, available_tokens: int = sys.maxsize
) -> bool:
if message_type not in (Message.INSTRUCT, Message.CONTEXT):
raise ValueError(f"Current messages cannot be of type {message_type}.")
# New instructions and context are of the system role
message = OpenAIMessage(content=content, role='system')
message = OpenAIMessage(content=content, role="system")
num_tokens = openai_message_tokens(message.to_dict(), self.model)
if num_tokens > available_tokens:
@ -121,8 +125,9 @@ class OpenAIPrompt(Prompt):
def get_functions(self):
return self._new_messages.get(Message.FUNCTION, None)
def _prepend_history(self, message_type: str, message: Message,
token_limit: int = sys.maxsize) -> bool:
def _prepend_history(
self, message_type: str, message: Message, token_limit: int = sys.maxsize
) -> bool:
if message_type == Message.INSTRUCT:
raise ValueError("History messages cannot be of type INSTRUCT.")
num_tokens = openai_message_tokens(message.to_dict(), self.model)
@ -132,7 +137,7 @@ class OpenAIPrompt(Prompt):
self._request_tokens += num_tokens
return True
def prepend_history(self, prompt: 'OpenAIPrompt', token_limit: int = sys.maxsize) -> bool:
def prepend_history(self, prompt: "OpenAIPrompt", token_limit: int = sys.maxsize) -> bool:
# Prepend the first response and the request of the prompt
if not self._prepend_history(Message.CHAT, prompt.responses[0], token_limit):
return False
@ -148,9 +153,9 @@ class OpenAIPrompt(Prompt):
def set_request(self, content: str, function_name: Optional[str] = None) -> int:
if not content.strip():
raise ValueError("The request cannot be empty.")
message = OpenAIMessage(content=content,
role=('user' if not function_name else 'function'),
name=function_name)
message = OpenAIMessage(
content=content, role=("user" if not function_name else "function"), name=function_name
)
self.request = message
self._request_tokens += openai_message_tokens(message.to_dict(), self.model)
@ -166,17 +171,17 @@ class OpenAIPrompt(Prompt):
self._timestamp_from_dict(response_data)
self._id_from_dict(response_data)
self._request_tokens = response_data['usage']['prompt_tokens']
self._response_tokens = response_data['usage']['completion_tokens']
self._request_tokens = response_data["usage"]["prompt_tokens"]
self._response_tokens = response_data["usage"]["completion_tokens"]
for choice in response_data['choices']:
index = choice['index']
for choice in response_data["choices"]:
index = choice["index"]
if index >= len(self.responses):
self.responses.extend([None] * (index - len(self.responses) + 1))
self._response_reasons.extend([None] * (index - len(self._response_reasons) + 1))
self.responses[index] = OpenAIMessage.from_dict(choice['message'])
if choice['finish_reason']:
self._response_reasons[index] = choice['finish_reason']
self.responses[index] = OpenAIMessage.from_dict(choice["message"])
if choice["finish_reason"]:
self._response_reasons[index] = choice["finish_reason"]
def append_response(self, delta_str: str) -> str:
"""
@ -193,11 +198,11 @@ class OpenAIPrompt(Prompt):
self._timestamp_from_dict(response_data)
self._id_from_dict(response_data)
delta_content = ''
for choice in response_data['choices']:
delta = choice['delta']
index = choice['index']
finish_reason = choice['finish_reason']
delta_content = ""
for choice in response_data["choices"]:
delta = choice["delta"]
index = choice["index"]
finish_reason = choice["finish_reason"]
if index >= len(self.responses):
self.responses.extend([None] * (index - len(self.responses) + 1))
@ -206,22 +211,24 @@ class OpenAIPrompt(Prompt):
if not self.responses[index]:
self.responses[index] = OpenAIMessage.from_dict(delta)
if index == 0:
delta_content = self.responses[0].content if self.responses[0].content else ''
delta_content = self.responses[0].content if self.responses[0].content else ""
else:
if index == 0:
delta_content = self.responses[0].stream_from_dict(delta)
else:
self.responses[index].stream_from_dict(delta)
if 'function_call' in delta:
if 'name' in delta['function_call'] and \
self.responses[index].function_call.get('name', '') == '':
self.responses[index].function_call['name'] = \
delta['function_call']['name']
if 'arguments' in delta['function_call']:
self.responses[index].function_call['arguments'] = \
self.responses[index].function_call.get('arguments', '') + \
delta['function_call']['arguments']
if "function_call" in delta:
if (
"name" in delta["function_call"]
and self.responses[index].function_call.get("name", "") == ""
):
self.responses[index].function_call["name"] = delta["function_call"]["name"]
if "arguments" in delta["function_call"]:
self.responses[index].function_call["arguments"] = (
self.responses[index].function_call.get("arguments", "")
+ delta["function_call"]["arguments"]
)
if finish_reason:
self._response_reasons[index] = finish_reason
@ -231,19 +238,19 @@ class OpenAIPrompt(Prompt):
return sum(openai_response_tokens(resp.to_dict(), self.model) for resp in self.responses)
def _validate_model(self, response_data: dict):
if not response_data['model'].startswith(self.model):
logger.warning("Model mismatch: expected '%s', got '%s'",
self.model, response_data['model'])
if not response_data["model"].startswith(self.model):
logger.warning(
"Model mismatch: expected '%s', got '%s'", self.model, response_data["model"]
)
def _timestamp_from_dict(self, response_data: dict):
if not self._timestamp:
self._timestamp = response_data['created']
elif self._timestamp != response_data['created']:
self._timestamp = response_data['created']
self._timestamp = response_data["created"]
elif self._timestamp != response_data["created"]:
self._timestamp = response_data["created"]
def _id_from_dict(self, response_data: dict):
if self._id is None:
self._id = response_data['id']
elif self._id != response_data['id']:
raise ValueError(f"ID mismatch: expected {self._id}, "
f"got {response_data['id']}")
self._id = response_data["id"]
elif self._id != response_data["id"]:
raise ValueError(f"ID mismatch: expected {self._id}, " f"got {response_data['id']}")

View File

@ -1,12 +1,12 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, field, asdict
import hashlib
from datetime import datetime
import sys
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass, field
from datetime import datetime
from typing import Dict, List
from devchat.message import Message
from devchat.utils import unix_to_local_datetime, get_logger, user_id
from devchat.message import Message
from devchat.utils import get_logger, unix_to_local_datetime, user_id
logger = get_logger(__name__)
@ -33,16 +33,17 @@ class Prompt(ABC):
model: str
user_name: str
user_email: str
_new_messages: Dict = field(default_factory=lambda: {
Message.INSTRUCT: [],
'request': None,
Message.CONTEXT: [],
'responses': []
})
_history_messages: Dict[str, Message] = field(default_factory=lambda: {
Message.CONTEXT: [],
Message.CHAT: []
})
_new_messages: Dict = field(
default_factory=lambda: {
Message.INSTRUCT: [],
"request": None,
Message.CONTEXT: [],
"responses": [],
}
)
_history_messages: Dict[str, Message] = field(
default_factory=lambda: {Message.CONTEXT: [], Message.CHAT: []}
)
parent: str = None
references: List[str] = field(default_factory=list)
_timestamp: int = 0
@ -59,8 +60,9 @@ class Prompt(ABC):
bool: Whether the prompt is complete.
"""
if not self.request or not self.responses:
logger.warning("Incomplete prompt: request = %s, response = %s",
self.request, self.responses)
logger.warning(
"Incomplete prompt: request = %s, response = %s", self.request, self.responses
)
return False
if not self.timestamp:
@ -78,15 +80,15 @@ class Prompt(ABC):
@property
def request(self) -> Message:
return self._new_messages['request']
return self._new_messages["request"]
@request.setter
def request(self, value: Message):
self._new_messages['request'] = value
self._new_messages["request"] = value
@property
def responses(self) -> List[Message]:
return self._new_messages['responses']
return self._new_messages["responses"]
@property
def timestamp(self) -> int:
@ -142,8 +144,9 @@ class Prompt(ABC):
"""
@abstractmethod
def append_new(self, message_type: str, content: str,
available_tokens: int = sys.maxsize) -> bool:
def append_new(
self, message_type: str, content: str, available_tokens: int = sys.maxsize
) -> bool:
"""
Append a new message provided by the user to this prompt.
@ -215,9 +218,9 @@ class Prompt(ABC):
self._response_tokens = self._count_response_tokens()
data = asdict(self)
data.pop('_hash')
data.pop("_hash")
string = str(tuple(sorted(data.items())))
self._hash = hashlib.sha256(string.encode('utf-8')).hexdigest()
self._hash = hashlib.sha256(string.encode("utf-8")).hexdigest()
return self._hash
def formatted_header(self) -> str:
@ -238,12 +241,12 @@ class Prompt(ABC):
note = None
formatted_str = "\n\n"
reason = self._response_reasons[index]
if reason == 'length':
if reason == "length":
note = "Incomplete model output due to max_tokens parameter or token limit"
elif reason == 'function_call':
elif reason == "function_call":
formatted_str += self.responses[index].function_call_to_json() + "\n\n"
note = "The model decided to call a function"
elif reason == 'content_filter':
elif reason == "content_filter":
note = "Omitted content due to a flag from our content filters"
if note:
@ -262,8 +265,12 @@ class Prompt(ABC):
str: The formatted response string. None if the response is invalid.
"""
if index >= len(self.responses) or not self.responses[index]:
logger.error("Response index %d is invalid to format: request = %s, response = %s",
index, self.request, self.responses)
logger.error(
"Response index %d is invalid to format: request = %s, response = %s",
index,
self.request,
self.responses,
)
return None
formatted_str = ""
@ -280,8 +287,9 @@ class Prompt(ABC):
responses = []
for message in self.responses:
responses.append((message.content if message.content else "")
+ message.function_call_to_json())
responses.append(
(message.content if message.content else "") + message.function_call_to_json()
)
return {
"user": user_id(self.user_name, self.user_email)[0],
@ -292,5 +300,5 @@ class Prompt(ABC):
"request_tokens": self._request_tokens,
"response_tokens": self._response_tokens,
"hash": self.hash,
"parent": self.parent
"parent": self.parent,
}

View File

@ -1,10 +1,11 @@
# pylint: disable=import-outside-toplevel
from dataclasses import asdict
import json
import os
from typing import List, Dict, Any, Optional
from tinydb import TinyDB, where, Query
from dataclasses import asdict
from typing import Any, Dict, List, Optional
from tinydb import Query, TinyDB, where
from tinydb.table import Table
from devchat.chat import Chat
from devchat.prompt import Prompt
from devchat.utils import get_logger
@ -25,22 +26,24 @@ class Store:
if not os.path.isdir(store_dir):
os.makedirs(store_dir)
self._graph_path = os.path.join(store_dir, 'prompts.graphml')
self._chat_list_path = os.path.join(store_dir, 'prompts_list.json')
self._db_path = os.path.join(store_dir, 'prompts.json')
self._graph_path = os.path.join(store_dir, "prompts.graphml")
self._chat_list_path = os.path.join(store_dir, "prompts_list.json")
self._db_path = os.path.join(store_dir, "prompts.json")
self._chat = chat
self._db = TinyDB(self._db_path)
self._db_meta = self._migrate_db()
self._topics_table = self._db.table('topics')
self._topics_table = self._db.table("topics")
if os.path.isfile(self._chat_list_path):
with open(self._chat_list_path, 'r', encoding="utf-8") as file:
with open(self._chat_list_path, "r", encoding="utf-8") as file:
self._chat_lists = json.loads(file.read())
elif os.path.isfile(self._graph_path):
# convert old graphml to new json
from xml.etree.ElementTree import ParseError
import networkx as nx
try:
graph = nx.read_graphml(self._graph_path)
@ -48,25 +51,25 @@ class Store:
self._chat_lists = []
for root in roots:
chat_list = [(root, graph.nodes[root]['timestamp'])]
chat_list = [(root, graph.nodes[root]["timestamp"])]
ancestors = nx.ancestors(graph, root)
for ancestor in ancestors:
chat_list.append((ancestor, graph.nodes[ancestor]['timestamp']))
chat_list.append((ancestor, graph.nodes[ancestor]["timestamp"]))
self._chat_lists.append(chat_list)
with open(self._chat_list_path, 'w', encoding="utf-8") as file:
with open(self._chat_list_path, "w", encoding="utf-8") as file:
file.write(json.dumps(self._chat_lists))
# rename graphml to json
os.rename(self._graph_path, self._graph_path + '.bak')
os.rename(self._graph_path, self._graph_path + ".bak")
# update topic table, add request and response fields
# new fields: user, date, request, responses, hash
visible_topics = self._topics_table.all()
for topic in visible_topics:
prompt = self.get_prompt(topic['root'])
prompt = self.get_prompt(topic["root"])
if not prompt:
continue
self._update_topic_fields(topic, prompt)
@ -81,37 +84,38 @@ class Store:
self._initialize_topics_table()
def _update_topic_fields(self, topic, prompt):
topic['user'] = prompt.user_name
topic['date'] = prompt.timestamp
topic['request'] = prompt.request.content
topic['responses'] = prompt.responses[0].content if prompt.responses else ""
topic['hash'] = prompt.hash
if len(topic['request']) > 100:
topic['request'] = topic['request'][:100] + "..."
if len(topic['responses']) > 100:
topic['responses'] = topic['responses'][:100] + "..."
topic["user"] = prompt.user_name
topic["date"] = prompt.timestamp
topic["request"] = prompt.request.content
topic["responses"] = prompt.responses[0].content if prompt.responses else ""
topic["hash"] = prompt.hash
if len(topic["request"]) > 100:
topic["request"] = topic["request"][:100] + "..."
if len(topic["responses"]) > 100:
topic["responses"] = topic["responses"][:100] + "..."
def _migrate_db(self) -> Table:
"""
Migrate the database to the latest version.
"""
metadata = self._db.table('metadata')
metadata = self._db.table("metadata")
result = metadata.get(where("version").exists())
if not result or result["version"].startswith("0.1."):
result = metadata.get(where('version').exists())
if not result or result['version'].startswith('0.1.'):
def replace_response():
def transform(doc):
if '_new_messages' not in doc or 'response' not in doc['_new_messages']:
logger.error("Prompt %s does not match '_new_messages.response'",
doc['_hash'])
doc['_new_messages']['responses'] = doc['_new_messages'].pop('response')
if "_new_messages" not in doc or "response" not in doc["_new_messages"]:
logger.error(
"Prompt %s does not match '_new_messages.response'", doc["_hash"]
)
doc["_new_messages"]["responses"] = doc["_new_messages"].pop("response")
return transform
logger.info("Migrating database from %s to 0.2.0", result)
self._db.update(replace_response(),
Query()._new_messages.response.exists()) # pylint: disable=W0212
metadata.insert({'version': '0.2.0'})
self._db.update(replace_response(), Query()._new_messages.response.exists())
metadata.insert({"version": "0.2.0"})
return metadata
def _initialize_topics_table(self):
@ -120,18 +124,13 @@ class Store:
continue
first = chat_list[0]
last = chat_list[-1]
last = chat_list[-1]
topic = {
'root': first[0],
'latest_time': last[1],
'title': None,
'hidden': False
}
topic = {"root": first[0], "latest_time": last[1], "title": None, "hidden": False}
prompt = self.get_prompt(topic['root'])
prompt = self.get_prompt(topic["root"])
if not prompt:
logger.error("Prompt %s not found while selecting from the store", topic['root'])
logger.error("Prompt %s not found while selecting from the store", topic["root"])
continue
self._update_topic_fields(topic, prompt)
@ -145,17 +144,17 @@ class Store:
if chat_list[-1][0] == prompt.hash:
topic_hash = chat_list[0][0]
topic = next((t for t in self._topics_table if t['root'] == topic_hash), None)
topic = next((t for t in self._topics_table if t["root"] == topic_hash), None)
if topic:
topic['latest_time'] = max(topic.get('latest_time', 0), prompt.timestamp)
topic["latest_time"] = max(topic.get("latest_time", 0), prompt.timestamp)
self._topics_table.update(topic, doc_ids=[topic.doc_id])
break
else:
topic = {
'root': prompt.hash,
'latest_time': prompt.timestamp,
'title': None,
'hidden': False
"root": prompt.hash,
"latest_time": prompt.timestamp,
"title": None,
"hidden": False,
}
self._update_topic_fields(topic, prompt)
self._topics_table.insert(topic)
@ -187,12 +186,11 @@ class Store:
self._chat_lists.append([(prompt.hash, prompt.timestamp)])
self._update_topics_table(prompt)
with open(self._chat_list_path, 'w', encoding="utf-8") as file:
with open(self._chat_list_path, "w", encoding="utf-8") as file:
file.write(json.dumps(self._chat_lists))
return topic_hash
def get_prompt(self, prompt_hash: str) -> Prompt:
"""
Retrieve a prompt from the store.
@ -203,7 +201,7 @@ class Store:
Prompt: The retrieved prompt. None if the prompt is not found.
"""
# Retrieve the prompt object from TinyDB
prompt_data = self._db.search(where('_hash') == prompt_hash)
prompt_data = self._db.search(where("_hash") == prompt_hash)
if not prompt_data:
logger.warning("Prompt %s not found while retrieving from object store.", prompt_hash)
return None
@ -266,24 +264,25 @@ class Store:
List[Dict[str, Any]]: A list of dictionaries containing root prompts
with latest_time, and title fields.
"""
visible_topics = self._topics_table.search(
where('hidden') == False) # pylint: disable=C0121
sorted_topics = sorted(visible_topics, key=lambda x: x['latest_time'], reverse=True)
visible_topics = self._topics_table.search(where("hidden") == False) # noqa: E712
sorted_topics = sorted(visible_topics, key=lambda x: x["latest_time"], reverse=True)
topics = []
for topic in sorted_topics[start:end]:
topics.append({
'root_prompt': {
'hash': topic['root'],
'user': topic['user'],
'date': topic['date'],
'request': topic['request'],
'responses': [topic['responses']],
},
'latest_time': topic['latest_time'],
'title': topic['title'],
'hidden': topic['hidden'],
})
topics.append(
{
"root_prompt": {
"hash": topic["root"],
"user": topic["user"],
"date": topic["date"],
"request": topic["request"],
"responses": [topic["responses"]],
},
"latest_time": topic["latest_time"],
"title": topic["title"],
"hidden": topic["hidden"],
}
)
return topics
def delete_prompt(self, prompt_hash: str) -> bool:
@ -316,13 +315,13 @@ class Store:
return False
# Update the topics table
self._topics_table.remove(where('root') == prompt_hash)
self._topics_table.remove(where("root") == prompt_hash)
# Remove the prompt from the database
self._db.remove(where('_hash') == prompt_hash)
self._db.remove(where("_hash") == prompt_hash)
# Save the graph
with open(self._chat_list_path, 'w', encoding="utf-8") as file:
with open(self._chat_list_path, "w", encoding="utf-8") as file:
file.write(json.dumps(self._chat_lists))
return True

View File

@ -1,19 +1,17 @@
# pylint: disable=import-outside-toplevel
import datetime
import getpass
import hashlib
import logging
import os
import re
import getpass
import socket
import subprocess
from typing import List, Tuple, Optional
import datetime
import hashlib
from typing import List, Optional, Tuple
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# pylint: disable=invalid-name
log_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
encoding = None
def setup_logger(file_path: Optional[str] = None):
"""Utility function to set up a global file log handler."""
if file_path is None:
@ -28,7 +26,7 @@ def get_logger(name: str = None, handler: logging.Handler = None) -> logging.Log
local_logger = logging.getLogger(name)
# Default to 'INFO' if 'LOG_LEVEL' env is not set
log_level_str = os.getenv('LOG_LEVEL', 'INFO')
log_level_str = os.getenv("LOG_LEVEL", "INFO")
log_level = getattr(logging, log_level_str.upper(), logging.INFO)
local_logger.setLevel(log_level)
@ -55,9 +53,13 @@ def find_root_dir() -> Tuple[Optional[str], Optional[str]]:
repo_dir = None
try:
repo_dir = subprocess.run(["git", "rev-parse", "--show-toplevel"],
capture_output=True, text=True, check=True,
encoding='utf-8').stdout.strip()
repo_dir = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
capture_output=True,
text=True,
check=True,
encoding="utf-8",
).stdout.strip()
if not os.path.isdir(repo_dir):
repo_dir = None
else:
@ -66,8 +68,9 @@ def find_root_dir() -> Tuple[Optional[str], Optional[str]]:
repo_dir = None
try:
result = subprocess.run(["svn", "info"],
capture_output=True, text=True, check=True, encoding='utf-8')
result = subprocess.run(
["svn", "info"], capture_output=True, text=True, check=True, encoding="utf-8"
)
if result.returncode == 0:
for line in result.stdout.splitlines():
if line.startswith("Working Copy Root Path: "):
@ -81,10 +84,10 @@ def find_root_dir() -> Tuple[Optional[str], Optional[str]]:
def add_gitignore(target_dir: str, *ignore_entries: str) -> None:
gitignore_path = os.path.join(target_dir, '.gitignore')
gitignore_path = os.path.join(target_dir, ".gitignore")
if os.path.exists(gitignore_path):
with open(gitignore_path, 'r', encoding='utf-8') as gitignore_file:
with open(gitignore_path, "r", encoding="utf-8") as gitignore_file:
gitignore_content = gitignore_file.read()
new_entries = []
@ -93,15 +96,15 @@ def add_gitignore(target_dir: str, *ignore_entries: str) -> None:
new_entries.append(entry)
if new_entries:
with open(gitignore_path, 'a', encoding='utf-8') as gitignore_file:
gitignore_file.write('\n# devchat\n')
with open(gitignore_path, "a", encoding="utf-8") as gitignore_file:
gitignore_file.write("\n# devchat\n")
for entry in new_entries:
gitignore_file.write(f'{entry}\n')
gitignore_file.write(f"{entry}\n")
else:
with open(gitignore_path, 'w', encoding='utf-8') as gitignore_file:
gitignore_file.write('# devchat\n')
with open(gitignore_path, "w", encoding="utf-8") as gitignore_file:
gitignore_file.write("# devchat\n")
for entry in ignore_entries:
gitignore_file.write(f'{entry}\n')
gitignore_file.write(f"{entry}\n")
def unix_to_local_datetime(unix_time) -> datetime.datetime:
@ -116,8 +119,8 @@ def unix_to_local_datetime(unix_time) -> datetime.datetime:
def get_user_info() -> Tuple[str, str]:
try:
cmd = ['git', 'config', 'user.name']
user_name = subprocess.check_output(cmd, encoding='utf-8').strip()
cmd = ["git", "config", "user.name"]
user_name = subprocess.check_output(cmd, encoding="utf-8").strip()
except Exception:
try:
user_name = getpass.getuser()
@ -126,17 +129,17 @@ def get_user_info() -> Tuple[str, str]:
user_name = user_dir.split(os.sep)[-1]
try:
cmd = ['git', 'config', 'user.email']
user_email = subprocess.check_output(cmd, encoding='utf-8').strip()
cmd = ["git", "config", "user.email"]
user_email = subprocess.check_output(cmd, encoding="utf-8").strip()
except Exception:
user_email = user_name + '@' + socket.gethostname()
user_email = user_name + "@" + socket.gethostname()
return user_name, user_email
def user_id(user_name, user_email) -> Tuple[str, str]:
user_str = f"{user_name} <{user_email}>"
user_hash = hashlib.sha1(user_str.encode('utf-8')).hexdigest()
user_hash = hashlib.sha1(user_str.encode("utf-8")).hexdigest()
return user_str, user_hash
@ -151,7 +154,7 @@ def parse_files(file_paths: List[str]) -> List[str]:
contents = []
for file_path in file_paths:
with open(file_path, 'r', encoding='utf-8') as file:
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
if not content:
raise ValueError(f"File {file_path} is empty.")
@ -161,7 +164,7 @@ def parse_files(file_paths: List[str]) -> List[str]:
def valid_hash(hash_str):
"""Check if a string is a valid hash value."""
pattern = re.compile(r'^[a-f0-9]{64}$') # for SHA-256 hash
pattern = re.compile(r"^[a-f0-9]{64}$") # for SHA-256 hash
return bool(pattern.match(hash_str))
@ -198,25 +201,24 @@ def update_dict(dict_to_update, key, value) -> dict:
return dict_to_update
def openai_message_tokens(messages: dict, model: str) -> int: # pylint: disable=unused-argument
def openai_message_tokens(messages: dict, model: str) -> int:
"""Returns the number of tokens used by a message."""
if not os.environ.get("USE_TIKTOKEN", False):
return len(str(messages))/4
return len(str(messages)) / 4
# pylint: disable=global-statement
global encoding
if not encoding:
import tiktoken
script_dir = os.path.dirname(os.path.realpath(__file__))
os.environ['TIKTOKEN_CACHE_DIR'] = os.path.join(script_dir, 'tiktoken_cache')
os.environ["TIKTOKEN_CACHE_DIR"] = os.path.join(script_dir, "tiktoken_cache")
try:
encoding = tiktoken.get_encoding("cl100k_base")
except Exception:
from tiktoken import registry
from tiktoken.registry import _find_constructors
from tiktoken.core import Encoding
from tiktoken.registry import _find_constructors
def get_encoding(name: str):
_find_constructors()

View File

@ -1,8 +1,9 @@
import click
from devchat.workflow.command.update import update
from devchat.workflow.command.list import list_cmd
from devchat.workflow.command.env import env
from devchat.workflow.command.config import config_cmd
from devchat.workflow.command.env import env
from devchat.workflow.command.list import list_cmd
from devchat.workflow.command.update import update
@click.group(help="CLI for devchat workflow engine.")

View File

@ -1,6 +1,6 @@
import json
from pathlib import Path
import click
import oyaml as yaml
@ -10,7 +10,6 @@ from devchat.workflow.path import WORKFLOWS_BASE, WORKFLOWS_CONFIG_FILENAME
@click.command(help="Workflow configuration.", name="config")
@click.option("--json", "in_json", is_flag=True, help="Output in json format.")
def config_cmd(in_json: bool):
config_path = Path(WORKFLOWS_BASE) / WORKFLOWS_CONFIG_FILENAME
config_content = {}
if config_path.exists():

View File

@ -1,14 +1,14 @@
# pylint: disable=invalid-name
"""
Commands for managing the python environment of workflows.
"""
import sys
from pathlib import Path
from typing import Optional, List
from typing import List, Optional
import click
from devchat.workflow.env_manager import PyEnvManager, MAMBA_PY_ENVS
from devchat.workflow.env_manager import MAMBA_PY_ENVS, PyEnvManager
def _get_all_env_names() -> List[str]:
@ -19,11 +19,7 @@ def _get_all_env_names() -> List[str]:
excludes = ["devchat", "devchat-ask", "devchat-commands"]
envs_path = Path(MAMBA_PY_ENVS)
envs = [
env.name
for env in envs_path.iterdir()
if env.is_dir() and env.name not in excludes
]
envs = [env.name for env in envs_path.iterdir() if env.is_dir() and env.name not in excludes]
return envs
@ -42,9 +38,7 @@ def list_envs():
required=False,
type=str,
)
@click.option(
"--all", "all_flag", help="Remove all the python envs of workflows.", is_flag=True
)
@click.option("--all", "all_flag", help="Remove all the python envs of workflows.", is_flag=True)
def remove(env_name: Optional[str] = None, all_flag: bool = False):
if not env_name and not all_flag:
click.echo("Please provide the name of the python env to remove.")

View File

@ -1,19 +1,19 @@
import json
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import List, Set, Tuple, Dict
from dataclasses import dataclass, asdict, field
from typing import Dict, List, Set, Tuple
import click
import oyaml as yaml
import yaml as pyyaml
from devchat.utils import get_logger
from devchat.workflow.namespace import get_prioritized_namespace_path
from devchat.workflow.path import COMMAND_FILENAMES
from devchat.utils import get_logger
logger = get_logger(__name__)
@dataclass
class WorkflowMeta:
name: str
@ -27,9 +27,7 @@ class WorkflowMeta:
return f"{'*' if self.active else ' '} {self.name} ({self.namespace})"
def iter_namespace(
ns_path: str, existing_names: Set[str]
) -> Tuple[List[WorkflowMeta], Set[str]]:
def iter_namespace(ns_path: str, existing_names: Set[str]) -> Tuple[List[WorkflowMeta], Set[str]]:
"""
Get all workflows under the namespace path.

View File

@ -1,27 +1,25 @@
# pylint: disable=invalid-name
import os
import shutil
import tempfile
import zipfile
from typing import List, Optional, Tuple
from pathlib import Path
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Tuple
import click
import requests
from devchat.utils import get_logger
from devchat.workflow.path import (
CHAT_DIR,
CUSTOM_BASE,
WORKFLOWS_BASE,
WORKFLOWS_BASE_NAME,
CUSTOM_BASE,
)
from devchat.utils import get_logger
HAS_GIT = False
try:
from git import Repo, InvalidGitRepositoryError, GitCommandError
from git import GitCommandError, InvalidGitRepositoryError, Repo
except ImportError:
pass
else:
@ -254,9 +252,7 @@ def update_by_git(workflow_base: Path):
remote_main_hash = repo.commit(f"origin/{DEFAULT_BRANCH}").hexsha
if local_main_hash == remote_main_hash:
click.echo(
f"Local branch is up-to-date with remote {DEFAULT_BRANCH}. Skip update."
)
click.echo(f"Local branch is up-to-date with remote {DEFAULT_BRANCH}. Skip update.")
return
try:
@ -290,15 +286,11 @@ def copy_workflows_usr():
shutil.copytree(old_usr_dir, new_usr_dir)
click.echo(f"Copied {old_usr_dir} to {new_usr_dir} successfully.")
else:
click.echo(
f"Skip copying usr dir. old exists: {old_exists}, new exists: {new_exists}."
)
click.echo(f"Skip copying usr dir. old exists: {old_exists}, new exists: {new_exists}.")
@click.command(help="Update the workflow_base dir.")
@click.option(
"-f", "--force", is_flag=True, help="Force update the workflows to the latest main."
)
@click.option("-f", "--force", is_flag=True, help="Force update the workflows to the latest main.")
def update(force: bool):
click.echo(f"Updating wf repo... force: {force}")
click.echo(f"WORKFLOWS_BASE: {WORKFLOWS_BASE}")

View File

@ -1,14 +1,12 @@
# pylint: disable=invalid-name
import os
import sys
import subprocess
from typing import Optional, Dict
import sys
from typing import Dict, Optional
from .envs import MAMBA_BIN_PATH
from .path import MAMBA_PY_ENVS, MAMBA_ROOT
from .user_setting import USER_SETTINGS
from .schema import ExternalPyConf
from .user_setting import USER_SETTINGS
# CONDA_FORGE = [
# "https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/",
@ -28,8 +26,10 @@ def _get_external_envs() -> Dict[str, ExternalPyConf]:
return external_pythons
EXTERNAL_ENVS = _get_external_envs()
class PyEnvManager:
mamba_bin = MAMBA_BIN_PATH
mamba_root = MAMBA_ROOT
@ -82,15 +82,11 @@ class PyEnvManager:
]
env = os.environ.copy()
env.pop("PYTHONPATH")
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
) as proc:
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) as proc:
proc.wait()
if proc.returncode != 0:
print(
f"Failed to install requirements: {requirements_file}", flush=True
)
print(f"Failed to install requirements: {requirements_file}", flush=True)
return False
return True
@ -149,9 +145,7 @@ class PyEnvManager:
f"python={py_version}",
"-y",
]
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as proc:
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
proc.wait()
if proc.returncode != 0:
@ -178,9 +172,7 @@ class PyEnvManager:
self.mamba_root,
"-y",
]
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as proc:
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
proc.wait()
if proc.returncode != 0:

View File

@ -1,6 +1,7 @@
"""
Explicitly define the environment variables used in the workflow engine.
"""
import os
PYTHON_PATH = os.environ.get("PYTHONPATH", "")

View File

@ -4,15 +4,17 @@ Namespace management for workflows
import os
from typing import List
from pydantic import BaseModel, Extra, ValidationError
import oyaml as yaml
from pydantic import BaseModel, Extra, ValidationError
from devchat.utils import get_logger
from .path import (
CUSTOM_BASE,
MERICO_WORKFLOWS,
COMMUNITY_WORKFLOWS,
CUSTOM_BASE,
CUSTOM_CONFIG_FILE,
MERICO_WORKFLOWS,
)
logger = get_logger(__name__)

View File

@ -1,7 +1,7 @@
import re
from typing import Optional, List, Dict, Union
from typing import Dict, List, Optional, Union
from pydantic import BaseModel, validator, Extra, ValidationError
from pydantic import BaseModel, Extra, ValidationError, validator
class WorkflowPyConf(BaseModel):
@ -10,7 +10,7 @@ class WorkflowPyConf(BaseModel):
env_name: Optional[str] # python env name, will use the workflow name if not set
@validator("version")
def validate_version(cls, value): # pylint: disable=no-self-argument
def validate_version(cls, value):
pattern = r"^\d+\.\d+(\.\d+)?$"
if not re.match(pattern, value):
raise ValidationError(
@ -41,7 +41,7 @@ class WorkflowConfig(BaseModel):
help: Optional[Union[str, Dict[str, str]]] = None
@validator("input_required", pre=True)
def to_boolean(cls, value): # pylint: disable=no-self-argument
def to_boolean(cls, value):
return value.lower() == "required"
class Config:

View File

@ -1,15 +1,14 @@
# pylint: disable=invalid-name
import json
import os
import shlex
import subprocess
import sys
import threading
import subprocess
import shlex
import json
from typing import Dict, Tuple, List
from enum import Enum
from .schema import WorkflowConfig, RuntimeParameter
from typing import Dict, List, Tuple
from .path import WORKFLOWS_BASE
from .schema import RuntimeParameter, WorkflowConfig
class BuiltInVars(str, Enum):
@ -47,9 +46,7 @@ class WorkflowStep:
"""
return self._kwargs.get("run", "")
def _setup_env(
self, wf_config: WorkflowConfig, rt_param: RuntimeParameter
) -> Dict[str, str]:
def _setup_env(self, wf_config: WorkflowConfig, rt_param: RuntimeParameter) -> Dict[str, str]:
"""
Setup the environment variables for the subprocess.
"""
@ -96,8 +93,7 @@ class WorkflowStep:
if BuiltInVars.workflow_python in command_raw:
if not rt_param.workflow_python:
raise ValueError(
"The command uses $workflow_python, "
"but the workflow_python is not set yet."
"The command uses $workflow_python, " "but the workflow_python is not set yet."
)
args = []
@ -129,10 +125,7 @@ class WorkflowStep:
return args
def run(
self, wf_config: WorkflowConfig, rt_param: RuntimeParameter
) -> Tuple[int, str, str]:
def run(self, wf_config: WorkflowConfig, rt_param: RuntimeParameter) -> Tuple[int, str, str]:
"""
Run the step in a subprocess.

View File

@ -1,7 +1,9 @@
from pathlib import Path
import oyaml as yaml
from .path import USER_SETTINGS_FILENAME, WORKFLOWS_BASE
from .schema import UserSettings
from .path import WORKFLOWS_BASE, USER_SETTINGS_FILENAME
def _load_user_settings() -> UserSettings:
@ -20,4 +22,5 @@ def _load_user_settings() -> UserSettings:
return UserSettings()
USER_SETTINGS = _load_user_settings()

View File

@ -1,15 +1,14 @@
# pylint: disable=invalid-name
import os
import sys
from typing import Optional, Tuple, List, Dict
import oyaml as yaml
from .step import WorkflowStep
from .schema import WorkflowConfig, RuntimeParameter
from .path import COMMAND_FILENAMES
from .namespace import get_prioritized_namespace_path
from typing import Dict, List, Optional, Tuple
from .env_manager import PyEnvManager, EXTERNAL_ENVS
import oyaml as yaml
from .env_manager import EXTERNAL_ENVS, PyEnvManager
from .namespace import get_prioritized_namespace_path
from .path import COMMAND_FILENAMES
from .schema import RuntimeParameter, WorkflowConfig
from .step import WorkflowStep
class Workflow:
@ -47,9 +46,7 @@ class Workflow:
workflow_name = striped.split()[0][1:]
# remove the trigger prefix and the workflow name
actual_input = user_input.replace(
f"{Workflow.TRIGGER_PREFIX}{workflow_name}", "", 1
)
actual_input = user_input.replace(f"{Workflow.TRIGGER_PREFIX}{workflow_name}", "", 1)
return workflow_name, actual_input
@staticmethod