11# Rename this file to .env once you have filled in the below environment variables!
2+ # Whether to enable file logging
3+ USAGE_LOG_FILE = true
4+ # Include this environment variable if you want more logging for debugging locally
5+ LOG_LEVEL = debug
6+ # Operating environment, different from NODE_ENV. NODE_ENV is determined at build time, while this variable is used for enabling certain features in different environments
7+ # development | production | test
8+ OPERATING_ENV = production
9+ # Resource file storage location
10+ STORAGE_DIR = /public/uploads
11+ # Maximum upload size for attachments
12+ MAX_UPLOAD_SIZE_MB = 5
213
3- # Get your GROQ API Key here -
4- # https://console.groq.com/keys
5- # You only need this environment variable set if you want to use Groq models
6- GROQ_API_KEY =
7-
8- # Get your HuggingFace API Key here -
9- # https://huggingface.co/settings/tokens
10- # You only need this environment variable set if you want to use HuggingFace models
11- HuggingFace_API_KEY =
12-
13-
14- # Get your Open AI API Key by following these instructions -
15- # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
16- # You only need this environment variable set if you want to use GPT models
17- OPENAI_API_KEY =
18-
19- # Get your Anthropic API Key in your account settings -
20- # https://console.anthropic.com/settings/keys
21- # You only need this environment variable set if you want to use Claude models
22- ANTHROPIC_API_KEY =
23-
24- # Get your OpenRouter API Key in your account settings -
25- # https://openrouter.ai/settings/keys
26- # You only need this environment variable set if you want to use OpenRouter models
27- OPEN_ROUTER_API_KEY =
14+ # Example Context Values for qwen2.5-coder:32b
15+ #
16+ # DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
17+ # DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
18+ # DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
19+ # DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
20+ DEFAULT_NUM_CTX =
2821
29- # Get your Google Generative AI API Key by following these instructions -
30- # https://console.cloud.google.com/apis/credentials
31- # You only need this environment variable set if you want to use Google Generative AI models
32- GOOGLE_GENERATIVE_AI_API_KEY =
22+ # LLM Configuration Options
23+ # Enabled model providers, currently supporting Anthropic, Cohere, Deepseek, Google, Groq, HuggingFace, Hyperbolic, Mistral, Ollama, OpenAI, OpenRouter, OpenAILike, Perplexity, xAI, Together, LMStudio, AmazonBedrock, Github
24+ LLM_PROVIDER =
3325
34- # You only need this environment variable set if you want to use oLLAMA models
26+ # BASE URL of the current model provider, some providers require this to be set, such as OpenAILike, Ollama, LMStudio
3527# DONT USE http://localhost:11434 due to IPV6 issues
3628# USE EXAMPLE http://127.0.0.1:11434
37- OLLAMA_API_BASE_URL =
38-
39- # You only need this environment variable set if you want to use OpenAI Like models
40- OPENAI_LIKE_API_BASE_URL =
41-
42- # You only need this environment variable set if you want to use Together AI models
43- TOGETHER_API_BASE_URL =
44-
45- # You only need this environment variable set if you want to use DeepSeek models through their API
46- DEEPSEEK_API_KEY =
47-
48- # Get your OpenAI Like API Key
49- OPENAI_LIKE_API_KEY =
50-
51- # Get your Together API Key
52- TOGETHER_API_KEY =
53-
54- # You only need this environment variable set if you want to use Hyperbolic models
55- # Get your Hyperbolics API Key at https://app.hyperbolic.xyz/settings
56- # baseURL="https://api.hyperbolic.xyz/v1/chat/completions"
57- HYPERBOLIC_API_KEY =
58- HYPERBOLIC_API_BASE_URL =
59-
60- # Get your Mistral API Key by following these instructions -
61- # https://console.mistral.ai/api-keys/
62- # You only need this environment variable set if you want to use Mistral models
63- MISTRAL_API_KEY =
64-
65- # Get the Cohere Api key by following these instructions -
66- # https://dashboard.cohere.com/api-keys
67- # You only need this environment variable set if you want to use Cohere models
68- COHERE_API_KEY =
29+ PROVIDER_BASE_URL =
6930
70- # Get LMStudio Base URL from LM Studio Developer Console
71- # Make sure to enable CORS
72- # DONT USE http://localhost:1234 due to IPV6 issues
73- # Example: http://127.0.0.1:1234
74- LMSTUDIO_API_BASE_URL =
75-
76- # Get your xAI API key
77- # https://x.ai/api
78- # You only need this environment variable set if you want to use xAI models
79- XAI_API_KEY =
80-
81- # Get your Perplexity API Key here -
82- # https://www.perplexity.ai/settings/api
83- # You only need this environment variable set if you want to use Perplexity models
84- PERPLEXITY_API_KEY =
85-
86- # Get your AWS configuration
31+ # API KEY of the current provider, used to request the model API. Some providers do not require this to be set.
32+ # Specifically, if the model provider is AmazonBedrock, this should be a JSON string, reference:
8733# https://console.aws.amazon.com/iam/home
8834# The JSON should include the following keys:
8935# - region: The AWS region where Bedrock is available.
@@ -92,60 +38,30 @@ PERPLEXITY_API_KEY=
9238# - sessionToken (optional): Temporary session token if using an IAM role or temporary credentials.
9339# Example JSON:
9440# {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"}
95- AWS_BEDROCK_CONFIG =
41+ PROVIDER_API_KEY =
9642
97- # 是否开启文件日志
98- USAGE_LOG_FILE = false
99- # Include this environment variable if you want more logging for debugging locally
100- LOG_LEVEL = debug
43+ # MODEL used for page generation (should correspond to LLM_DEFAULT_PROVIDER)
44+ LLM_DEFAULT_MODEL =
10145
102- # Example Context Values for qwen2.5-coder:32b
103- #
104- # DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
105- # DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
106- # DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
107- # DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
108- DEFAULT_NUM_CTX =
46+ # MODEL used for auxiliary page generation, such as summarization and pre-analysis. (should correspond to LLM_DEFAULT_PROVIDER)
47+ LLM_MINOR_MODEL =
10948
11049# Get your Serper API Key https://serper.dev/
11150SERPER_API_KEY =
11251
11352# Get your Weather API Key https://www.weatherapi.com/my/
11453WEATHER_API_KEY =
11554
116- # LLM Configuration Options
117-
118- # Default LLM provider to use (e.g.,OpenAILike,OpenAI, Anthropic, Mistral)
119- LLM_DEFAULT_PROVIDER =
120-
121- # 生成页面所使用的 MODEL(应该与 LLM_DEFAULT_PROVIDER 相对应)
122- LLM_DEFAULT_MODEL =
123-
124- # 用于辅助页面生成所使用的 MODEL,例如总结和预分析。(应该与 LLM_DEFAULT_PROVIDER 相对应)
125- LLM_MINOR_MODEL =
126-
127- # Comma-separated list of enabled providers (empty means all providers)
128- # Example: OpenAILike,OpenAI,Anthropic,Mistral
129- LLM_ENABLED_PROVIDERS =
130-
131- # Logto 集成所需环境变量
132- # Logto 地址
55+ # Environment variables required for Logto integration
56+ # Logto endpoint
13357LOGTO_ENDPOINT =
134- # Logto 应用 ID
58+ # Logto application ID
13559LOGTO_APP_ID =
136- # Logto 应用密钥
60+ # Logto application secret
13761LOGTO_APP_SECRET =
138- # 应用基础 URL,根据实际部署环境修改
62+ # Application base URL, modify according to actual deployment environment
13963LOGTO_BASE_URL = http://localhost:5173
140- # 随机任意的 36 位字符串,用于加密 Logto 的 cookie。
64+ # Random 36-character string, used to encrypt Logto cookies.
14165LOGTO_COOKIE_SECRET =
142- # 是否在开发环境中启用 Logto 认证,设置为 false 则在开发环境不强制认证
143- LOGTO_ENABLE_DEV = false
144- # 运行环境,与 NODE_ENV 有所不同, NODE_ENV 在打包时就已确定,而此变量用于某些功能在不同环境下的开放
145- # development | production | test
146- OPERATING_ENV = production
147-
148- # 资源文件存储位置
149- STORAGE_DIR = /public/uploads
150- # 附件上传的最大大小
151- MAX_UPLOAD_SIZE_MB = 5
66+ # Whether to enable Logto authentication in development environment, set to false to not enforce authentication in development
67+ LOGTO_ENABLE = false
0 commit comments