| |
| _target_: flow_modules.aiflows.ChatFlowModule.ChatAtomicFlow.instantiate_from_default_config |
|
|
| name: ChatAtomicFlow |
| description: "Flow which uses as tool an LLM though an API" |
| enable_cache: True |
|
|
| n_api_retries: 6 |
| wait_time_between_retries: 20 |
|
|
| system_name: system |
| user_name: user |
| assistant_name: assistant |
|
|
| backend: |
| _target_: aiflows.backends.llm_lite.LiteLLMBackend |
| api_infos: ??? |
| model_name: "gpt-3.5-turbo" |
| n: 1 |
| max_tokens: 2000 |
| temperature: 0.3 |
|
|
|
|
| top_p: 0.2 |
| frequency_penalty: 0 |
| presence_penalty: 0 |
| stream: True |
|
|
|
|
| system_message_prompt_template: |
| _target_: aiflows.prompt_template.JinjaPrompt |
|
|
|
|
| init_human_message_prompt_template: |
| _target_: aiflows.prompt_template.JinjaPrompt |
| template: "{{query}}" |
| input_variables: |
| - "query" |
|
|
| human_message_prompt_template: |
| _target_: aiflows.prompt_template.JinjaPrompt |
| template: "{{query}}" |
| input_variables: |
| - "query" |
| input_interface_initialized: |
| - "query" |
|
|
| query_message_prompt_template: |
| _target_: aiflows.prompt_template.JinjaPrompt |
|
|
|
|
| previous_messages: |
| first_k: null |
| last_k: null |
|
|
| output_interface: |
| - "api_output" |
|
|