add new route command

This commit is contained in:
bobo.yang 2023-12-18 21:49:58 +08:00
parent 217a4782e9
commit aac8aff366
6 changed files with 233 additions and 67 deletions

View File

@ -2,10 +2,12 @@ from .log import log
from .prompt import prompt
from .run import run
from .topic import topic
from .route import route
__all__ = [
'log',
'prompt',
'run',
'topic'
'topic',
'route'
]

View File

@ -8,6 +8,7 @@ from devchat._cli import log
from devchat._cli import prompt
from devchat._cli import run
from devchat._cli import topic
from devchat._cli import route
logger = get_logger(__name__)
click.rich_click.USE_MARKDOWN = True
@ -24,3 +25,4 @@ main.add_command(prompt)
main.add_command(log)
main.add_command(run)
main.add_command(topic)
main.add_command(route)

View File

@ -8,6 +8,7 @@ from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig
from devchat.store import Store
from devchat.utils import parse_files
from devchat._cli.utils import handle_errors, init_dir, get_model_config
from devchat._cli.router import llm_prompt
@click.command()
@ -28,13 +29,11 @@ from devchat._cli.utils import handle_errors, init_dir, get_model_config
help='Specify the function name when the content is the output of a function.')
@click.option('-ns', '--not-store', is_flag=True, default=False, required=False,
help='Do not save the conversation to the store.')
@click.option('-a', '--auto', is_flag=True, default=False, required=False,
help='Answer question by function-calling.')
def prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
functions: Optional[str] = None, function_name: Optional[str] = None,
not_store: Optional[bool] = False, auto: Optional[bool] = False):
not_store: Optional[bool] = False):
"""
This command performs interactions with the specified large language model (LLM)
by sending prompts and receiving responses.
@ -68,51 +67,15 @@ def prompt(content: Optional[str], parent: Optional[str], reference: Optional[Li
```
"""
repo_chat_dir, user_chat_dir = init_dir()
with handle_errors():
if content is None:
content = click.get_text_stream('stdin').read()
if content == '':
return
instruct_contents = parse_files(instruct)
context_contents = parse_files(context)
model, config = get_model_config(repo_chat_dir, user_chat_dir, model)
parameters_data = config.dict(exclude_unset=True)
if config_str:
config_data = json.loads(config_str)
parameters_data.update(config_data)
openai_config = OpenAIChatConfig(model=model, **parameters_data)
chat = OpenAIChat(openai_config)
chat_store = Store(repo_chat_dir, chat)
assistant = Assistant(chat, chat_store, config.max_input_tokens, not not_store)
functions_data = None
if functions is not None:
with open(functions, 'r', encoding="utf-8") as f_file:
functions_data = json.load(f_file)
assistant.make_prompt(content, instruct_contents, context_contents, functions_data,
parent=parent, references=reference,
function_name=function_name)
click.echo(assistant.prompt.formatted_header())
command_result = run_command(
openai_config,
model,
assistant.prompt.messages,
content,
parent,
context_contents,
auto)
if command_result is not None:
sys.exit(command_result[0])
for response in assistant.iterate_response():
click.echo(response, nl=False)
llm_prompt(
content,
parent,
reference,
instruct,
context,
model,
config_str,
functions,
function_name,
not_store)
sys.exit(0)

View File

@ -0,0 +1,69 @@
import sys
from typing import List, Optional
import rich_click as click
from devchat._cli.router import llm_route
@click.command()
@click.argument('content', required=False)
@click.option('-p', '--parent', help='Input the parent prompt hash to continue the conversation.')
@click.option('-r', '--reference', multiple=True,
help='Input one or more specific previous prompts to include in the current prompt.')
@click.option('-i', '--instruct', multiple=True,
help='Add one or more files to the prompt as instructions.')
@click.option('-c', '--context', multiple=True,
help='Add one or more files to the prompt as a context.')
@click.option('-m', '--model', help='Specify the model to use for the prompt.')
@click.option('--config', 'config_str',
help='Specify a JSON string to overwrite the default configuration for this prompt.')
@click.option('-a', '--auto', is_flag=True, default=False, required=False,
help='Answer question by function-calling.')
def route(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
auto: Optional[bool] = False):
"""
This command performs interactions with the specified large language model (LLM)
by sending prompts and receiving responses.
Examples
--------
To send a multi-line message to the LLM, use the here-doc syntax:
```bash
devchat prompt << 'EOF'
What is the capital of France?
Can you tell me more about its history?
EOF
```
Note the quotes around EOF in the first line, to prevent the shell from expanding variables.
Configuration
-------------
DevChat CLI reads configuration from `~/.chat/config.yml`
(if `~/.chat` is not accessible, it will try `.chat` in your current Git or SVN root directory).
You can edit the file to modify default configuration.
To use OpenAI's APIs, you have to set an API key by the environment variable `OPENAI_API_KEY`.
Run the following command line with your API key:
```bash
export OPENAI_API_KEY="sk-..."
```
"""
llm_route(
content,
parent,
reference,
instruct,
context,
model,
config_str,
auto
)
sys.exit(0)

View File

@ -0,0 +1,116 @@
import json
import sys
from typing import List, Optional
import rich_click as click
from devchat.engine import run_command
from devchat.assistant import Assistant
from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig
from devchat.store import Store
from devchat.utils import parse_files
from devchat._cli.utils import handle_errors, init_dir, get_model_config
def before_prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
functions: Optional[str] = None, function_name: Optional[str] = None,
not_store: Optional[bool] = False):
repo_chat_dir, user_chat_dir = init_dir()
if content is None:
content = click.get_text_stream('stdin').read()
if content == '':
return
instruct_contents = parse_files(instruct)
context_contents = parse_files(context)
model, config = get_model_config(repo_chat_dir, user_chat_dir, model)
parameters_data = config.dict(exclude_unset=True)
if config_str:
config_data = json.loads(config_str)
parameters_data.update(config_data)
openai_config = OpenAIChatConfig(model=model, **parameters_data)
chat = OpenAIChat(openai_config)
chat_store = Store(repo_chat_dir, chat)
assistant = Assistant(chat, chat_store, config.max_input_tokens, not not_store)
functions_data = None
if functions is not None:
with open(functions, 'r', encoding="utf-8") as f_file:
functions_data = json.load(f_file)
assistant.make_prompt(content, instruct_contents, context_contents, functions_data,
parent=parent, references=reference,
function_name=function_name)
return openai_config, model, assistant, content, context_contents
def llm_prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
functions: Optional[str] = None, function_name: Optional[str] = None,
not_store: Optional[bool] = False):
with handle_errors():
_1, _2, assistant, _3, _4 = before_prompt(
content, parent, reference, instruct, context, model, config_str, functions, function_name, not_store
)
click.echo(assistant.prompt.formatted_header())
for response in assistant.iterate_response():
click.echo(response, nl=False)
def llm_commmand(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None):
with handle_errors():
openai_config, model, assistant, content, context_contents = before_prompt(
content, parent, reference, instruct, context, model, config_str, None, None, True
)
click.echo(assistant.prompt.formatted_header())
command_result = run_command(
openai_config,
model,
assistant.prompt.messages,
content,
parent,
context_contents,
False)
if command_result is not None:
sys.exit(0)
click.echo("run command fail.")
click.echo(command_result)
sys.exit(-1)
def llm_route(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
auto: Optional[bool] = False):
with handle_errors():
openai_config, model, assistant, content, context_contents = before_prompt(
content, parent, reference, instruct, context, model, config_str, None, None, True
)
click.echo(assistant.prompt.formatted_header())
command_result = run_command(
openai_config,
model,
assistant.prompt.messages,
content,
parent,
context_contents,
auto)
if command_result is not None:
sys.exit(command_result[0])
for response in assistant.iterate_response():
click.echo(response, nl=False)

View File

@ -2,7 +2,7 @@ import json
import os
import shutil
import sys
from typing import List
from typing import List, Optional
import rich_click as click
try:
from git import Repo, GitCommandError
@ -12,6 +12,7 @@ from devchat._cli.utils import init_dir, handle_errors, valid_git_repo, clone_gi
from devchat._cli.utils import download_and_extract_workflow
from devchat.engine import Namespace, CommandParser, RecursivePrompter
from devchat.utils import get_logger
from devchat._cli.router import llm_commmand
logger = get_logger(__name__)
@ -24,7 +25,21 @@ logger = get_logger(__name__)
help='List commands recursively.')
@click.option('--update-sys', 'update_sys_flag', is_flag=True, default=False,
help='Pull the `sys` command directory from the DevChat repository.')
def run(command: str, list_flag: bool, recursive_flag: bool, update_sys_flag: bool):
@click.option('-p', '--parent', help='Input the parent prompt hash to continue the conversation.')
@click.option('-r', '--reference', multiple=True,
help='Input one or more specific previous prompts to include in the current prompt.')
@click.option('-i', '--instruct', multiple=True,
help='Add one or more files to the prompt as instructions.')
@click.option('-c', '--context', multiple=True,
help='Add one or more files to the prompt as a context.')
@click.option('-m', '--model', help='Specify the model to use for the prompt.')
@click.option('--config', 'config_str',
help='Specify a JSON string to overwrite the default configuration for this prompt.')
def run(command: str, list_flag: bool, recursive_flag: bool, update_sys_flag: bool,
parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
auto: Optional[bool] = False):
"""
Operate the workflow engine of DevChat.
"""
@ -43,13 +58,12 @@ def run(command: str, list_flag: bool, recursive_flag: bool, update_sys_flag: bo
if update_sys_flag:
sys_dir = os.path.join(workflows_dir, 'sys')
git_urls = [
'https://gitlab.com/devchat-ai/workflows.git',
'https://gitee.com/devchat-ai/workflows.git',
'https://github.com/devchat-ai/workflows.git'
'https://github.com/devchat-ai/workflows.git',
'https://gitlab.com/devchat-ai/workflows.git'
]
zip_urls = [
'https://gitlab.com/devchat-ai/workflows/-/archive/main/workflows-main.zip',
'https://codeload.github.com/devchat-ai/workflows/zip/refs/heads/main'
'https://codeload.github.com/devchat-ai/workflows/zip/refs/heads/main',
'https://gitlab.com/devchat-ai/workflows/-/archive/main/workflows-main.zip'
]
_clone_or_pull_git_repo(sys_dir, git_urls, zip_urls)
return
@ -69,15 +83,15 @@ def run(command: str, list_flag: bool, recursive_flag: bool, update_sys_flag: bo
return
if command:
cmd = commander.parse(command)
if not cmd:
click.echo(f"Error: Failed to find command: {command}", err=True)
sys.exit(1)
if not cmd.steps:
prompter = RecursivePrompter(namespace)
click.echo(prompter.run(command))
else:
click.echo(json.dumps(cmd.dict()))
llm_commmand(
command,
parent,
reference,
instruct,
context,
model,
config_str
)
return