NiwWin commited on
Commit
203991d
·
verified ·
1 Parent(s): fba7b67

Update librechat.yaml

Browse files
Files changed (1) hide show
  1. librechat.yaml +41 -41
librechat.yaml CHANGED
@@ -7,86 +7,86 @@ cache: true
7
  # Definition of custom endpoints
8
  endpoints:
9
  custom:
 
10
  # Mistral AI API
11
- - name: "Mistral" # Unique name for the endpoint
12
  # For `apiKey` and `baseURL`, you can use environment variables that you define.
13
  # recommended environment variables:
14
- apiKey: "${MISTRAL_API_KEY}"
15
- baseURL: "https://api.mistral.ai/v1"
16
 
17
  # Models configuration
18
- models:
19
  # List of default models to use. At least one value is required.
20
- default: ["mistral-tiny", "mistral-small", "mistral-medium"]
21
  # Fetch option: Set to true to fetch models from API.
22
- fetch: true # Defaults to false.
23
 
24
  # Optional configurations
25
 
26
  # Title Conversation setting
27
- titleConvo: true # Set to true to enable title conversation
28
 
29
  # Title Method: Choose between "completion" or "functions".
30
- titleMethod: "completion" # Defaults to "completion" if omitted.
31
 
32
  # Title Model: Specify the model to use for titles.
33
- titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
34
 
35
  # Summarize setting: Set to true to enable summarization.
36
- summarize: false
37
 
38
  # Summary Model: Specify the model to use if summarization is enabled.
39
- summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
40
 
41
  # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
42
- forcePrompt: false
43
 
44
  # The label displayed for the AI model in messages.
45
- modelDisplayLabel: "Mistral" # Default is "AI" when not set.
46
 
47
  # Add additional parameters to the request. Default params will be overwritten.
48
- addParams:
49
- safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
50
 
51
  # Drop Default params parameters from the request. See default params in guide linked below.
52
- dropParams: ["stop", "temperature", "top_p"]
53
  # - stop # dropped since it's not recognized by Mistral AI API
54
  # `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used:
55
  # - temperature
56
  # - top_p
57
 
58
  # OpenRouter.ai Example
59
- - name: "OpenRouter"
60
  # For `apiKey` and `baseURL`, you can use environment variables that you define.
61
  # recommended environment variables:
62
  # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
63
- apiKey: "${OPENROUTER_KEY}"
64
- baseURL: "https://openrouter.ai/api/v1"
65
- models:
66
- default: ["nousresearch/nous-capybara-7b:free", "mistralai/mistral-7b-instruct:free", "huggingfaceh4/zephyr-7b-beta:free", "openchat/openchat-7b:free", "gryphe/mythomist-7b:free", "undi95/toppy-m-7b:free", "openrouter/cinematika-7b:free", "openrouter/auto", "nousresearch/nous-capybara-7b", "mistralai/mistral-7b-instruct", "huggingfaceh4/zephyr-7b-beta", "openchat/openchat-7b", "gryphe/mythomist-7b", "openrouter/cinematika-7b", "rwkv/rwkv-5-world-3b", "recursal/rwkv-5-3b-ai-town", "jondurbin/bagel-34b", "jebcarter/psyfighter-13b", "koboldai/psyfighter-13b-2", "neversleep/noromaid-mixtral-8x7b-instruct", "nousresearch/nous-hermes-llama2-13b", "meta-llama/codellama-34b-instruct", "phind/phind-codellama-34b", "intel/neural-chat-7b", "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", "nousresearch/nous-hermes-2-mixtral-8x7b-sft", "haotian-liu/llava-13b", "nousresearch/nous-hermes-2-vision-7b", "meta-llama/llama-2-13b-chat", "gryphe/mythomax-l2-13b", "nousresearch/nous-hermes-llama2-70b", "teknium/openhermes-2-mistral-7b", "teknium/openhermes-2.5-mistral-7b", "undi95/remm-slerp-l2-13b", "undi95/toppy-m-7b", "01-ai/yi-34b-chat", "01-ai/yi-34b", "01-ai/yi-6b", "togethercomputer/stripedhyena-nous-7b", "togethercomputer/stripedhyena-hessian-7b", "mistralai/mixtral-8x7b", "nousresearch/nous-hermes-yi-34b", "open-orca/mistral-7b-openorca", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-1106", "openai/gpt-3.5-turbo-16k", "openai/gpt-4-1106-preview", "openai/gpt-4", "openai/gpt-4-32k", "openai/gpt-4-vision-preview", "openai/gpt-3.5-turbo-instruct", "google/palm-2-chat-bison", "google/palm-2-codechat-bison", "google/palm-2-chat-bison-32k", "google/palm-2-codechat-bison-32k", "google/gemini-pro", "google/gemini-pro-vision", "perplexity/pplx-70b-online", "perplexity/pplx-7b-online", "perplexity/pplx-7b-chat", "perplexity/pplx-70b-chat", "meta-llama/llama-2-70b-chat", "nousresearch/nous-capybara-34b", "jondurbin/airoboros-l2-70b", "austism/chronos-hermes-13b", "migtissera/synthia-70b", "pygmalionai/mythalion-13b", "undi95/remm-slerp-l2-13b-6k", "xwin-lm/xwin-lm-70b", "gryphe/mythomax-l2-13b-8k", "alpindale/goliath-120b ", "lizpreciatior/lzlv-70b-fp16-hf", "neversleep/noromaid-20b", "mistralai/mixtral-8x7b-instruct", "cognitivecomputations/dolphin-mixtral-8x7b", "anthropic/claude-2", "anthropic/claude-2.0", "anthropic/claude-instant-v1", "mancer/weaver", "mistralai/mistral-tiny", "mistralai/mistral-small", "mistralai/mistral-medium"]
67
- fetch: true
68
- titleConvo: true
69
- titleModel: "gpt-3.5-turbo"
70
- summarize: false
71
- summaryModel: "gpt-3.5-turbo"
72
- forcePrompt: false
73
- modelDisplayLabel: "OpenRouter"
74
 
75
- - name: "Reverse Proxy"
76
  # For `apiKey` and `baseURL`, you can use environment variables that you define.
77
- # recommended environment variables:
78
  # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
79
- apiKey: "user_provided"
80
- baseURL: "user_provided"
81
- models:
82
- default: ["gpt-3.5-turbo"]
83
- fetch: true
84
- titleConvo: true
85
- titleModel: "gpt-3.5-turbo"
86
- summarize: false
87
- summaryModel: "gpt-3.5-turbo"
88
- forcePrompt: false
89
- modelDisplayLabel: "AI"
90
 
91
  # See the Custom Configuration Guide for more information:
92
  # https://docs.librechat.ai/install/configuration/custom_config.html
 
7
  # Definition of custom endpoints
8
  endpoints:
9
  custom:
10
+
11
  # Mistral AI API
12
+ # - name: "Mistral" # Unique name for the endpoint
13
  # For `apiKey` and `baseURL`, you can use environment variables that you define.
14
  # recommended environment variables:
15
+ # apiKey: "${MISTRAL_API_KEY}"
16
+ # baseURL: "https://api.mistral.ai/v1"
17
 
18
  # Models configuration
19
+ # models:
20
  # List of default models to use. At least one value is required.
21
+ # default: ["mistral-tiny", "mistral-small", "mistral-medium"]
22
  # Fetch option: Set to true to fetch models from API.
23
+ # fetch: true # Defaults to false.
24
 
25
  # Optional configurations
26
 
27
  # Title Conversation setting
28
+ # titleConvo: true # Set to true to enable title conversation
29
 
30
  # Title Method: Choose between "completion" or "functions".
31
+ # titleMethod: "completion" # Defaults to "completion" if omitted.
32
 
33
  # Title Model: Specify the model to use for titles.
34
+ # titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
35
 
36
  # Summarize setting: Set to true to enable summarization.
37
+ # summarize: false
38
 
39
  # Summary Model: Specify the model to use if summarization is enabled.
40
+ # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
41
 
42
  # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
43
+ # forcePrompt: false
44
 
45
  # The label displayed for the AI model in messages.
46
+ # modelDisplayLabel: "Mistral" # Default is "AI" when not set.
47
 
48
  # Add additional parameters to the request. Default params will be overwritten.
49
+ # addParams:
50
+ # safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
51
 
52
  # Drop Default params parameters from the request. See default params in guide linked below.
53
+ # dropParams: ["stop", "temperature", "top_p"]
54
  # - stop # dropped since it's not recognized by Mistral AI API
55
  # `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used:
56
  # - temperature
57
  # - top_p
58
 
59
  # OpenRouter.ai Example
60
+ # - name: "OpenRouter"
61
  # For `apiKey` and `baseURL`, you can use environment variables that you define.
62
  # recommended environment variables:
63
  # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
64
+ # apiKey: "${OPENROUTER_KEY}"
65
+ # baseURL: "https://openrouter.ai/api/v1"
66
+ # default: ["nousresearch/nous-capybara-7b:free", "mistralai/mistral-7b-instruct:free", "huggingfaceh4/zephyr-7b-beta:free", "openchat/openchat-7b:free", "gryphe/mythomist-7b:free", "undi95/toppy-m-7b:free", "openrouter/cinematika-7b:free", "openrouter/auto", "nousresearch/nous-capybara-7b", "mistralai/mistral-7b-instruct", "huggingfaceh4/zephyr-7b-beta", "openchat/openchat-7b", "gryphe/mythomist-7b", "openrouter/cinematika-7b", "rwkv/rwkv-5-world-3b", "recursal/rwkv-5-3b-ai-town", "jondurbin/bagel-34b", "jebcarter/psyfighter-13b", "koboldai/psyfighter-13b-2", "neversleep/noromaid-mixtral-8x7b-instruct", "nousresearch/nous-hermes-llama2-13b", "meta-llama/codellama-34b-instruct", "phind/phind-codellama-34b", "intel/neural-chat-7b", "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", "nousresearch/nous-hermes-2-mixtral-8x7b-sft", "haotian-liu/llava-13b", "nousresearch/nous-hermes-2-vision-7b", "meta-llama/llama-2-13b-chat", "gryphe/mythomax-l2-13b", "nousresearch/nous-hermes-llama2-70b", "teknium/openhermes-2-mistral-7b", "teknium/openhermes-2.5-mistral-7b", "undi95/remm-slerp-l2-13b", "undi95/toppy-m-7b", "01-ai/yi-34b-chat", "01-ai/yi-34b", "01-ai/yi-6b", "togethercomputer/stripedhyena-nous-7b", "togethercomputer/stripedhyena-hessian-7b", "mistralai/mixtral-8x7b", "nousresearch/nous-hermes-yi-34b", "open-orca/mistral-7b-openorca", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-1106", "openai/gpt-3.5-turbo-16k", "openai/gpt-4-1106-preview", "openai/gpt-4", "openai/gpt-4-32k", "openai/gpt-4-vision-preview", "openai/gpt-3.5-turbo-instruct", "google/palm-2-chat-bison", "google/palm-2-codechat-bison", "google/palm-2-chat-bison-32k", "google/palm-2-codechat-bison-32k", "google/gemini-pro", "google/gemini-pro-vision", "perplexity/pplx-70b-online", "perplexity/pplx-7b-online", "perplexity/pplx-7b-chat", "perplexity/pplx-70b-chat", "meta-llama/llama-2-70b-chat", "nousresearch/nous-capybara-34b", "jondurbin/airoboros-l2-70b", "austism/chronos-hermes-13b", "migtissera/synthia-70b", "pygmalionai/mythalion-13b", "undi95/remm-slerp-l2-13b-6k", "xwin-lm/xwin-lm-70b", "gryphe/mythomax-l2-13b-8k", "alpindale/goliath-120b ", "lizpreciatior/lzlv-70b-fp16-hf", "neversleep/noromaid-20b", "mistralai/mixtral-8x7b-instruct", "cognitivecomputations/dolphin-mixtral-8x7b", "anthropic/claude-2", "anthropic/claude-2.0", "anthropic/claude-instant-v1", "mancer/weaver", "mistralai/mistral-tiny", "mistralai/mistral-small", "mistralai/mistral-medium"]
67
+ # fetch: true
68
+ # titleConvo: true
69
+ # titleModel: "gpt-3.5-turbo"
70
+ # summarize: false
71
+ # summaryModel: "gpt-3.5-turbo"
72
+ # forcePrompt: false
73
+ # modelDisplayLabel: "OpenRouter"
 
74
 
75
+ #- name: "Reverse Proxy"
76
  # For `apiKey` and `baseURL`, you can use environment variables that you define.
77
+ # # recommended environment variables:
78
  # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
79
+ # apiKey: "user_provided"
80
+ # baseURL: "user_provided"
81
+ # models:
82
+ # default: ["gpt-3.5-turbo"]
83
+ # fetch: true
84
+ # titleConvo: true
85
+ # titleModel: "gpt-3.5-turbo"
86
+ # summarize: false
87
+ # summaryModel: "gpt-3.5-turbo"
88
+ # forcePrompt: false
89
+ # modelDisplayLabel: "AI"
90
 
91
  # See the Custom Configuration Guide for more information:
92
  # https://docs.librechat.ai/install/configuration/custom_config.html