Skip to content

Commit f2df793

Browse files
committed
pref: simplified AI provider's settings
1 parent 5ec38df commit f2df793

39 files changed

+261
-751
lines changed

.env.example

Lines changed: 38 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -1,89 +1,35 @@
11
# Rename this file to .env once you have filled in the below environment variables!
2+
# Whether to enable file logging
3+
USAGE_LOG_FILE=true
4+
# Include this environment variable if you want more logging for debugging locally
5+
LOG_LEVEL=debug
6+
# Operating environment, different from NODE_ENV. NODE_ENV is determined at build time, while this variable is used for enabling certain features in different environments
7+
# development | production | test
8+
OPERATING_ENV=production
9+
# Resource file storage location
10+
STORAGE_DIR=/public/uploads
11+
# Maximum upload size for attachments
12+
MAX_UPLOAD_SIZE_MB=5
213

3-
# Get your GROQ API Key here -
4-
# https://console.groq.com/keys
5-
# You only need this environment variable set if you want to use Groq models
6-
GROQ_API_KEY=
7-
8-
# Get your HuggingFace API Key here -
9-
# https://huggingface.co/settings/tokens
10-
# You only need this environment variable set if you want to use HuggingFace models
11-
HuggingFace_API_KEY=
12-
13-
14-
# Get your Open AI API Key by following these instructions -
15-
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
16-
# You only need this environment variable set if you want to use GPT models
17-
OPENAI_API_KEY=
18-
19-
# Get your Anthropic API Key in your account settings -
20-
# https://console.anthropic.com/settings/keys
21-
# You only need this environment variable set if you want to use Claude models
22-
ANTHROPIC_API_KEY=
23-
24-
# Get your OpenRouter API Key in your account settings -
25-
# https://openrouter.ai/settings/keys
26-
# You only need this environment variable set if you want to use OpenRouter models
27-
OPEN_ROUTER_API_KEY=
14+
# Example Context Values for qwen2.5-coder:32b
15+
#
16+
# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
17+
# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
18+
# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
19+
# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
20+
DEFAULT_NUM_CTX=
2821

29-
# Get your Google Generative AI API Key by following these instructions -
30-
# https://console.cloud.google.com/apis/credentials
31-
# You only need this environment variable set if you want to use Google Generative AI models
32-
GOOGLE_GENERATIVE_AI_API_KEY=
22+
# LLM Configuration Options
23+
# Enabled model providers, currently supporting Anthropic, Cohere, Deepseek, Google, Groq, HuggingFace, Hyperbolic, Mistral, Ollama, OpenAI, OpenRouter, OpenAILike, Perplexity, xAI, Together, LMStudio, AmazonBedrock, Github
24+
LLM_PROVIDER=
3325

34-
# You only need this environment variable set if you want to use oLLAMA models
26+
# BASE URL of the current model provider, some providers require this to be set, such as OpenAILike, Ollama, LMStudio
3527
# DONT USE http://localhost:11434 due to IPV6 issues
3628
# USE EXAMPLE http://127.0.0.1:11434
37-
OLLAMA_API_BASE_URL=
38-
39-
# You only need this environment variable set if you want to use OpenAI Like models
40-
OPENAI_LIKE_API_BASE_URL=
41-
42-
# You only need this environment variable set if you want to use Together AI models
43-
TOGETHER_API_BASE_URL=
44-
45-
# You only need this environment variable set if you want to use DeepSeek models through their API
46-
DEEPSEEK_API_KEY=
47-
48-
# Get your OpenAI Like API Key
49-
OPENAI_LIKE_API_KEY=
50-
51-
# Get your Together API Key
52-
TOGETHER_API_KEY=
53-
54-
# You only need this environment variable set if you want to use Hyperbolic models
55-
#Get your Hyperbolics API Key at https://app.hyperbolic.xyz/settings
56-
#baseURL="https://api.hyperbolic.xyz/v1/chat/completions"
57-
HYPERBOLIC_API_KEY=
58-
HYPERBOLIC_API_BASE_URL=
59-
60-
# Get your Mistral API Key by following these instructions -
61-
# https://console.mistral.ai/api-keys/
62-
# You only need this environment variable set if you want to use Mistral models
63-
MISTRAL_API_KEY=
64-
65-
# Get the Cohere Api key by following these instructions -
66-
# https://dashboard.cohere.com/api-keys
67-
# You only need this environment variable set if you want to use Cohere models
68-
COHERE_API_KEY=
29+
PROVIDER_BASE_URL=
6930

70-
# Get LMStudio Base URL from LM Studio Developer Console
71-
# Make sure to enable CORS
72-
# DONT USE http://localhost:1234 due to IPV6 issues
73-
# Example: http://127.0.0.1:1234
74-
LMSTUDIO_API_BASE_URL=
75-
76-
# Get your xAI API key
77-
# https://x.ai/api
78-
# You only need this environment variable set if you want to use xAI models
79-
XAI_API_KEY=
80-
81-
# Get your Perplexity API Key here -
82-
# https://www.perplexity.ai/settings/api
83-
# You only need this environment variable set if you want to use Perplexity models
84-
PERPLEXITY_API_KEY=
85-
86-
# Get your AWS configuration
31+
# API KEY of the current provider, used to request the model API. Some providers do not require this to be set.
32+
# Specifically, if the model provider is AmazonBedrock, this should be a JSON string, reference:
8733
# https://console.aws.amazon.com/iam/home
8834
# The JSON should include the following keys:
8935
# - region: The AWS region where Bedrock is available.
@@ -92,60 +38,30 @@ PERPLEXITY_API_KEY=
9238
# - sessionToken (optional): Temporary session token if using an IAM role or temporary credentials.
9339
# Example JSON:
9440
# {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"}
95-
AWS_BEDROCK_CONFIG=
41+
PROVIDER_API_KEY=
9642

97-
# 是否开启文件日志
98-
USAGE_LOG_FILE=false
99-
# Include this environment variable if you want more logging for debugging locally
100-
LOG_LEVEL=debug
43+
# MODEL used for page generation (should correspond to LLM_DEFAULT_PROVIDER)
44+
LLM_DEFAULT_MODEL=
10145

102-
# Example Context Values for qwen2.5-coder:32b
103-
#
104-
# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
105-
# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
106-
# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
107-
# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
108-
DEFAULT_NUM_CTX=
46+
# MODEL used for auxiliary page generation, such as summarization and pre-analysis. (should correspond to LLM_DEFAULT_PROVIDER)
47+
LLM_MINOR_MODEL=
10948

11049
# Get your Serper API Key https://serper.dev/
11150
SERPER_API_KEY=
11251

11352
# Get your Weather API Key https://www.weatherapi.com/my/
11453
WEATHER_API_KEY=
11554

116-
# LLM Configuration Options
117-
118-
# Default LLM provider to use (e.g.,OpenAILike,OpenAI, Anthropic, Mistral)
119-
LLM_DEFAULT_PROVIDER=
120-
121-
# 生成页面所使用的 MODEL(应该与 LLM_DEFAULT_PROVIDER 相对应)
122-
LLM_DEFAULT_MODEL=
123-
124-
# 用于辅助页面生成所使用的 MODEL,例如总结和预分析。(应该与 LLM_DEFAULT_PROVIDER 相对应)
125-
LLM_MINOR_MODEL=
126-
127-
# Comma-separated list of enabled providers (empty means all providers)
128-
# Example: OpenAILike,OpenAI,Anthropic,Mistral
129-
LLM_ENABLED_PROVIDERS=
130-
131-
# Logto 集成所需环境变量
132-
# Logto 地址
55+
# Environment variables required for Logto integration
56+
# Logto endpoint
13357
LOGTO_ENDPOINT=
134-
# Logto 应用 ID
58+
# Logto application ID
13559
LOGTO_APP_ID=
136-
# Logto 应用密钥
60+
# Logto application secret
13761
LOGTO_APP_SECRET=
138-
# 应用基础 URL,根据实际部署环境修改
62+
# Application base URL, modify according to actual deployment environment
13963
LOGTO_BASE_URL=http://localhost:5173
140-
# 随机任意的 36 位字符串,用于加密 Logto 的 cookie。
64+
# Random 36-character string, used to encrypt Logto cookies.
14165
LOGTO_COOKIE_SECRET=
142-
# 是否在开发环境中启用 Logto 认证,设置为 false 则在开发环境不强制认证
143-
LOGTO_ENABLE_DEV=false
144-
# 运行环境,与 NODE_ENV 有所不同, NODE_ENV 在打包时就已确定,而此变量用于某些功能在不同环境下的开放
145-
# development | production | test
146-
OPERATING_ENV=production
147-
148-
# 资源文件存储位置
149-
STORAGE_DIR=/public/uploads
150-
# 附件上传的最大大小
151-
MAX_UPLOAD_SIZE_MB=5
66+
# Whether to enable Logto authentication in development environment, set to false to not enforce authentication in development
67+
LOGTO_ENABLE=false

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ FROM node:20.18.0-alpine AS runtime
3333
WORKDIR /app
3434

3535
ENV NODE_ENV=production
36-
ENV LOGTO_ENABLE_DEV=false
36+
ENV LOGTO_ENABLE=false
3737
ENV PORT=3000
3838
ENV HOST=0.0.0.0
3939

README.md

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,9 @@ docker run -d \
2626
--name upage \
2727
--restart unless-stopped \
2828
-p 3000:3000 \
29-
-e LLM_DEFAULT_PROVIDER=OpenAILike \
30-
-e OPENAI_LIKE_API_KEY=your-openai-like-api-key \
29+
-e LLM_PROVIDER=OpenAILike \
30+
-e PROVIDER_BASE_URL=your-provider-base-url \
31+
-e PROVIDER_API_KEY=your-openai-like-api-key \
3132
-e LLM_DEFAULT_MODEL=your-default-model \
3233
-e LLM_MINOR_MODEL=your-minor-model \
3334
-v ./data:/app/data \
@@ -37,8 +38,9 @@ docker run -d \
3738
```
3839

3940
其中参数说明如下:
40-
- `-e LLM_DEFAULT_PROVIDER=OpenAILike`:设置默认的 LLM 提供商为 OpenAILike,即兼容 OpenAI 的 API 接口。
41-
- `-e OPENAI_LIKE_API_KEY=your-openai-like-api-key`:设置 OpenAILike 的 API 密钥。
41+
- `-e LLM_PROVIDER=OpenAILike`:设置默认的 LLM 提供商为 OpenAILike,即兼容 OpenAI 的 API 接口。
42+
- `-e PROVIDER_BASE_URL=your-provider-base-url`:设置 LLM 提供商的 API 基础 URL,部分提供商需要设置此项,例如 OpenAILike, Ollama, LMStudio。
43+
- `-e PROVIDER_API_KEY=your-openai-like-api-key`:设置 LLM 提供商的 API 密钥,大部分提供商需要设置此项。
4244
- `-e LLM_DEFAULT_MODEL=your-default-model`:设置默认的 LLM 模型,用于构建页面。
4345
- `-e LLM_MINOR_MODEL=your-minor-model`:设置次要的 LLM 模型,用于执行其他任务。
4446
- `-v ./data:/app/data`:挂载数据目录

app/components/header/Header.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { useAuth } from '~/lib/hooks';
55
import { aiState } from '~/lib/stores/ai-state';
66
import { HistorySwitch } from '../sidebar/HistorySwitch';
77
import { ThemeSwitch } from '../ui/ThemeSwitch';
8-
import { ChatDescription } from './ChatDescription';
8+
import { ChatDescription } from './ChatDescription.client';
99
import { HeaderActionButtons } from './HeaderActionButtons';
1010
import { MinimalAvatarDropdown } from './MinimalAvatarDropdown';
1111

app/components/sidebar/Menu.client.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import { sidebarStore } from '~/lib/stores/sidebar';
1414
import { cubicEasingFn } from '~/utils/easings';
1515
import WithTooltip from '../ui/Tooltip';
1616
import { binDates } from './date-binning';
17-
import { HistoryItem } from './HistoryItem';
17+
import { HistoryItem } from './HistoryItem.client';
1818

1919
const menuVariants = {
2020
closed: {

app/lib/.server/auth.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ const config: LogtoConfig = {
9393
const originalLogto = makeLogtoRemix(config, { sessionStorage });
9494

9595
export function shouldEnforceAuth(): boolean {
96-
return process.env.LOGTO_ENABLE_DEV !== 'false';
96+
return process.env.LOGTO_ENABLE === 'true';
9797
}
9898

9999
function getMockDevUser(): MockUser {

app/lib/.server/llm/stream-enhancer.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { convertToModelMessages, type LanguageModel, streamText, type UIMessage } from 'ai';
22
import { createScopedLogger } from '~/lib/.server/logger';
3-
import { DEFAULT_PROVIDER } from '~/utils/constants';
3+
import { DEFAULT_PROVIDER } from '~/lib/modules/constants';
44
import { stripIndents } from '~/utils/strip-indent';
55

66
const logger = createScopedLogger('stream-enhancer');

app/lib/modules/constants.ts

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import { LLMManager } from '~/lib/modules/llm/manager.server';
2+
3+
const llmManager = LLMManager.getInstance();
4+
5+
export const DEFAULT_MODEL = llmManager.getDefaultModel();
6+
export const MINOR_MODEL = llmManager.getMinorModel();
7+
export const DEFAULT_PROVIDER = llmManager.getDefaultProvider();
8+
9+
export const DEFAULT_MODEL_DETAILS = DEFAULT_PROVIDER.staticModels.find((m) => m.name === DEFAULT_MODEL);
10+
export const MINOR_MODEL_DETAILS = DEFAULT_PROVIDER.staticModels.find((m) => m.name === MINOR_MODEL);
11+
12+
export const getModel = (model: string) => {
13+
return DEFAULT_PROVIDER.getModelInstance({
14+
model,
15+
providerSettings: llmManager.getConfiguredProviderSettings(),
16+
});
17+
};

0 commit comments

Comments
 (0)