2024-04-28 14:56:52 +00:00
|
|
|
# pylint: disable=import-outside-toplevel
|
2023-12-18 21:49:58 +08:00
|
|
|
import json
|
|
|
|
import sys
|
|
|
|
from typing import List, Optional
|
|
|
|
|
2024-05-13 09:44:17 +00:00
|
|
|
from devchat.workflow.workflow import Workflow
|
|
|
|
|
2023-12-18 21:49:58 +08:00
|
|
|
|
2024-02-08 13:19:24 +08:00
|
|
|
def _get_model_and_config(
|
|
|
|
model: Optional[str],
|
|
|
|
config_str: Optional[str]):
|
2024-04-28 14:56:52 +00:00
|
|
|
from devchat._cli.utils import init_dir, get_model_config
|
|
|
|
|
2024-02-22 14:42:55 +08:00
|
|
|
_1, user_chat_dir = init_dir()
|
|
|
|
model, config = get_model_config(user_chat_dir, model)
|
2024-02-08 13:19:24 +08:00
|
|
|
|
|
|
|
parameters_data = config.dict(exclude_unset=True)
|
|
|
|
if config_str:
|
|
|
|
config_data = json.loads(config_str)
|
|
|
|
parameters_data.update(config_data)
|
|
|
|
return model, parameters_data
|
|
|
|
|
|
|
|
def _load_tool_functions(functions: Optional[str]):
|
|
|
|
try:
|
|
|
|
if functions:
|
|
|
|
with open(functions, 'r', encoding="utf-8") as f_file:
|
|
|
|
return json.load(f_file)
|
|
|
|
return None
|
|
|
|
except Exception:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def _load_instruction_contents(content: str, instruct: Optional[List[str]]):
|
2024-04-28 14:56:52 +00:00
|
|
|
from devchat.engine import load_workflow_instruction
|
|
|
|
from devchat.utils import parse_files
|
|
|
|
|
2024-02-08 13:19:24 +08:00
|
|
|
instruct_contents = parse_files(instruct)
|
|
|
|
command_instructions = load_workflow_instruction(content)
|
|
|
|
if command_instructions is not None:
|
|
|
|
instruct_contents.extend(command_instructions)
|
|
|
|
|
|
|
|
return instruct_contents
|
|
|
|
|
2023-12-18 21:49:58 +08:00
|
|
|
|
|
|
|
def before_prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
|
|
|
|
instruct: Optional[List[str]], context: Optional[List[str]],
|
|
|
|
model: Optional[str], config_str: Optional[str] = None,
|
|
|
|
functions: Optional[str] = None, function_name: Optional[str] = None,
|
|
|
|
not_store: Optional[bool] = False):
|
2024-04-28 14:56:52 +00:00
|
|
|
from devchat.assistant import Assistant
|
|
|
|
from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig
|
|
|
|
from devchat.store import Store
|
|
|
|
from devchat.utils import parse_files
|
|
|
|
from devchat._cli.utils import init_dir
|
|
|
|
from devchat._cli.errors import MissContentInPromptException
|
|
|
|
|
2024-02-08 13:19:24 +08:00
|
|
|
repo_chat_dir, _1 = init_dir()
|
2023-12-18 21:49:58 +08:00
|
|
|
|
|
|
|
if content is None:
|
2024-04-28 14:56:52 +00:00
|
|
|
content = sys.stdin.read()
|
2023-12-18 21:49:58 +08:00
|
|
|
|
|
|
|
if content == '':
|
2024-02-08 13:19:24 +08:00
|
|
|
raise MissContentInPromptException()
|
2023-12-18 21:49:58 +08:00
|
|
|
|
2024-02-08 13:19:24 +08:00
|
|
|
instruct_contents = _load_instruction_contents(content, instruct)
|
2023-12-18 21:49:58 +08:00
|
|
|
context_contents = parse_files(context)
|
2024-02-08 13:19:24 +08:00
|
|
|
tool_functions = _load_tool_functions(functions)
|
2023-12-18 21:49:58 +08:00
|
|
|
|
2024-02-08 13:19:24 +08:00
|
|
|
model, parameters_data = _get_model_and_config(model, config_str)
|
|
|
|
max_input_tokens = parameters_data.get("max_input_tokens", 4000)
|
2023-12-18 21:49:58 +08:00
|
|
|
|
|
|
|
openai_config = OpenAIChatConfig(model=model, **parameters_data)
|
|
|
|
chat = OpenAIChat(openai_config)
|
|
|
|
chat_store = Store(repo_chat_dir, chat)
|
|
|
|
|
2024-02-08 13:19:24 +08:00
|
|
|
assistant = Assistant(chat, chat_store, max_input_tokens, not not_store)
|
|
|
|
assistant.make_prompt(
|
|
|
|
request = content,
|
|
|
|
instruct_contents = instruct_contents,
|
|
|
|
context_contents = context_contents,
|
|
|
|
functions = tool_functions,
|
|
|
|
parent=parent,
|
|
|
|
references=reference,
|
|
|
|
function_name=function_name
|
|
|
|
)
|
2023-12-18 21:49:58 +08:00
|
|
|
|
2024-02-08 13:19:24 +08:00
|
|
|
return model, assistant, content
|
2023-12-18 21:49:58 +08:00
|
|
|
|
|
|
|
def llm_prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
|
|
|
|
instruct: Optional[List[str]], context: Optional[List[str]],
|
|
|
|
model: Optional[str], config_str: Optional[str] = None,
|
|
|
|
functions: Optional[str] = None, function_name: Optional[str] = None,
|
|
|
|
not_store: Optional[bool] = False):
|
2024-04-28 14:56:52 +00:00
|
|
|
from devchat._cli.utils import handle_errors
|
|
|
|
|
2023-12-18 21:49:58 +08:00
|
|
|
with handle_errors():
|
2024-02-08 13:19:24 +08:00
|
|
|
_1, assistant, _3, = before_prompt(
|
|
|
|
content, parent, reference, instruct, context,
|
|
|
|
model, config_str, functions, function_name, not_store
|
2023-12-18 21:49:58 +08:00
|
|
|
)
|
|
|
|
|
2024-04-28 14:56:52 +00:00
|
|
|
print(assistant.prompt.formatted_header())
|
2023-12-18 21:49:58 +08:00
|
|
|
for response in assistant.iterate_response():
|
2024-04-28 14:56:52 +00:00
|
|
|
print(response, end='', flush=True)
|
2023-12-18 21:49:58 +08:00
|
|
|
|
|
|
|
|
|
|
|
def llm_commmand(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
|
|
|
|
instruct: Optional[List[str]], context: Optional[List[str]],
|
|
|
|
model: Optional[str], config_str: Optional[str] = None):
|
2024-04-28 14:56:52 +00:00
|
|
|
from devchat.engine import run_command
|
|
|
|
from devchat._cli.utils import handle_errors
|
|
|
|
|
2023-12-18 21:49:58 +08:00
|
|
|
with handle_errors():
|
2024-02-08 13:19:24 +08:00
|
|
|
model, assistant, content = before_prompt(
|
2023-12-18 21:49:58 +08:00
|
|
|
content, parent, reference, instruct, context, model, config_str, None, None, True
|
|
|
|
)
|
|
|
|
|
2024-04-28 14:56:52 +00:00
|
|
|
print(assistant.prompt.formatted_header())
|
2023-12-18 21:49:58 +08:00
|
|
|
command_result = run_command(
|
2024-02-08 13:19:24 +08:00
|
|
|
model_name = model,
|
|
|
|
history_messages = assistant.prompt.messages,
|
|
|
|
input_text = content,
|
|
|
|
parent_hash = parent,
|
|
|
|
auto_fun = False)
|
2023-12-18 21:49:58 +08:00
|
|
|
if command_result is not None:
|
|
|
|
sys.exit(0)
|
|
|
|
|
2024-04-28 14:56:52 +00:00
|
|
|
print("run command fail.")
|
|
|
|
print(command_result)
|
2023-12-18 21:49:58 +08:00
|
|
|
sys.exit(-1)
|
|
|
|
|
|
|
|
|
|
|
|
def llm_route(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
|
|
|
|
instruct: Optional[List[str]], context: Optional[List[str]],
|
|
|
|
model: Optional[str], config_str: Optional[str] = None,
|
|
|
|
auto: Optional[bool] = False):
|
2024-04-28 14:56:52 +00:00
|
|
|
from devchat.engine import run_command
|
|
|
|
from devchat._cli.utils import handle_errors
|
|
|
|
|
2023-12-18 21:49:58 +08:00
|
|
|
with handle_errors():
|
2024-02-08 13:19:24 +08:00
|
|
|
model, assistant, content = before_prompt(
|
2023-12-18 21:49:58 +08:00
|
|
|
content, parent, reference, instruct, context, model, config_str, None, None, True
|
|
|
|
)
|
|
|
|
|
2024-05-13 09:44:17 +00:00
|
|
|
name, user_input = Workflow.parse_trigger(content)
|
|
|
|
workflow = Workflow.load(name) if name else None
|
|
|
|
if workflow:
|
|
|
|
print(assistant.prompt.formatted_header())
|
|
|
|
|
|
|
|
return_code = 0
|
|
|
|
if workflow.should_show_help(user_input):
|
|
|
|
doc = workflow.get_help_doc(user_input)
|
|
|
|
print(doc)
|
|
|
|
|
|
|
|
else:
|
|
|
|
# run the workflow
|
|
|
|
workflow.setup(
|
|
|
|
model_name=model,
|
|
|
|
user_input=user_input,
|
|
|
|
history_messages=assistant.prompt.messages,
|
|
|
|
parent_hash=parent,
|
|
|
|
)
|
|
|
|
return_code = workflow.run_steps()
|
|
|
|
|
|
|
|
sys.exit(return_code)
|
|
|
|
|
2024-04-28 14:56:52 +00:00
|
|
|
print(assistant.prompt.formatted_header())
|
2023-12-18 21:49:58 +08:00
|
|
|
command_result = run_command(
|
2024-02-08 13:19:24 +08:00
|
|
|
model_name = model,
|
|
|
|
history_messages = assistant.prompt.messages,
|
|
|
|
input_text = content,
|
|
|
|
parent_hash = parent,
|
|
|
|
auto_fun = auto)
|
2023-12-18 21:49:58 +08:00
|
|
|
if command_result is not None:
|
|
|
|
sys.exit(command_result[0])
|
|
|
|
|
|
|
|
for response in assistant.iterate_response():
|
2024-04-28 14:56:52 +00:00
|
|
|
print(response, end='', flush=True)
|