workflows/libs/llm_api/openai.py

122 lines
4.7 KiB
Python
Raw Normal View History

# flake8: noqa: E402
import re
2023-12-13 14:18:08 +08:00
import os
import sys
import json
import openai
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from ide_services.services import log_warn
def _try_remove_markdown_block_flag(content):
"""
如果content是一个markdown块则删除它的头部```xxx和尾部```
"""
# 定义正则表达式模式用于匹配markdown块的头部和尾部
2023-12-08 18:28:36 +08:00
pattern = r"^\s*```\s*(\w+)\s*\n(.*?)\n\s*```\s*$"
# 使用re模块进行匹配
match = re.search(pattern, content, re.DOTALL | re.MULTILINE)
2023-12-08 18:28:36 +08:00
if match:
# 如果匹配成功则提取出markdown块的内容并返回
2023-12-08 18:38:12 +08:00
_ = match.group(1) # language
markdown_content = match.group(2)
return markdown_content.strip()
else:
# 如果匹配失败,则返回原始内容
return content
2023-12-08 18:28:36 +08:00
def chat_completion_stream(messages, llm_config, error_out: bool = True, stream_out=False) -> str:
"""
通过ChatCompletion API获取OpenAI聊天机器人的回复
2023-12-08 18:28:36 +08:00
Args:
messages: 一个列表包含用户输入的消息
llm_config: 一个字典包含ChatCompletion API的配置信息
error_out: 如果为True遇到异常时输出错误信息并返回None否则返回None
2023-12-08 18:28:36 +08:00
Returns:
如果成功获取到聊天机器人的回复返回一个字符串类型的回复消息如果连接失败则返回None
2023-12-08 18:28:36 +08:00
"""
2023-12-13 14:18:08 +08:00
for try_times in range(3):
try:
2023-12-13 14:18:08 +08:00
client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY", None),
2023-12-14 11:01:04 +08:00
base_url=os.environ.get("OPENAI_API_BASE", None),
2023-12-13 14:18:08 +08:00
)
2023-12-14 11:01:04 +08:00
llm_config["stream"] = True
llm_config["timeout"] = 8
response = client.chat.completions.create(messages=messages, **llm_config)
response_result = {"content": None, "function_name": None, "parameters": ""}
for chunk in response: # pylint: disable=E1133
2023-12-13 14:18:08 +08:00
chunk = chunk.dict()
delta = chunk["choices"][0]["delta"]
2023-12-14 11:01:04 +08:00
if "tool_calls" in delta and delta["tool_calls"]:
tool_call = delta["tool_calls"][0]["function"]
if tool_call.get("name", None):
response_result["function_name"] = tool_call["name"]
2023-12-13 14:18:08 +08:00
if tool_call.get("arguments", None):
response_result["parameters"] += tool_call["arguments"]
2023-12-14 11:01:04 +08:00
if delta.get("content", None):
if stream_out:
print(delta["content"], end="", flush=True)
2023-12-13 14:18:08 +08:00
if response_result["content"]:
response_result["content"] += delta["content"]
else:
response_result["content"] = delta["content"]
return response_result
except (openai.APIConnectionError, openai.APITimeoutError) as err:
log_warn(f"Exception: {err.__class__.__name__}: {err}")
2023-12-13 14:18:08 +08:00
if try_times >= 2:
return None
continue
2023-12-13 14:18:08 +08:00
except openai.APIError as err:
if error_out:
print("Exception:", err, file=sys.stderr, flush=True)
return None
return None
2023-12-08 18:28:36 +08:00
def chat_completion_no_stream_return_json(messages, llm_config, error_out: bool = True):
"""
2023-12-08 18:38:12 +08:00
尝试三次从聊天完成API获取结果并返回JSON对象
如果无法解析JSON将尝试三次直到出现错误或达到最大尝试次数
2023-12-08 18:28:36 +08:00
Args:
messages (List[str]): 用户输入的消息列表
llm_config (Dict[str, Any]): 聊天配置字典
error_out (bool, optional): 如果为True则如果出现错误将打印错误消息并返回None默认为True
2023-12-08 18:28:36 +08:00
Returns:
2023-12-08 18:38:12 +08:00
Dict[str, Any]: 从聊天完成API获取的JSON对象
如果无法解析JSON或达到最大尝试次数则返回None
"""
for _1 in range(3):
response = chat_completion_stream(messages, llm_config)
if response is None:
return None
try:
# json will format as ```json ... ``` in 1106 model
response_content = _try_remove_markdown_block_flag(response["content"])
response_obj = json.loads(response_content)
return response_obj
2023-12-13 14:18:08 +08:00
except json.JSONDecodeError:
log_warn(f"JSONDecodeError: {response['content']}")
continue
2023-12-13 14:18:08 +08:00
except Exception as err:
if error_out:
2023-12-14 11:01:04 +08:00
print("Exception: ", err, file=sys.stderr, flush=True)
2023-12-13 14:18:08 +08:00
return None
if error_out:
print("Not valid json response:", response["content"], file=sys.stderr, flush=True)
return None