大改,未验证
This commit is contained in:
12
.env
12
.env
@@ -48,7 +48,7 @@ LLM_API_KEY=sk-Gce85QLROESeOWf3icd2mQnYHOrmMYojwVPQ0AubMjGQ5ZE2
|
||||
LLM_BASE_URL=https://gemini.jeason.online/v1
|
||||
|
||||
# The specific model to use, e.g., "qwen-plus-latest", "gpt-3.5-turbo", "claude-3-sonnet-20240229"
|
||||
LLM_MODEL=gemini-2.5-flash
|
||||
LLM_MODEL=mimo-v2-flash
|
||||
|
||||
# The temperature for the model's responses (0.0 to 2.0).
|
||||
LLM_TEMPERATURE=0.7
|
||||
@@ -123,3 +123,13 @@ REDIS_DEFAULT_TTL=3600
|
||||
|
||||
# Enable Redis cache (set to False to disable caching)
|
||||
REDIS_ENABLED=True
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# EMBEDDING CONFIGURATION (知识库向量检索 - 本地模型)
|
||||
# ============================================================================
|
||||
# 暂时禁用,等有合适的 embedding API 或服务器资源时再启用
|
||||
EMBEDDING_ENABLED=False
|
||||
EMBEDDING_MODEL=BAAI/bge-small-zh-v1.5
|
||||
EMBEDDING_DIMENSION=512
|
||||
EMBEDDING_SIMILARITY_THRESHOLD=0.5
|
||||
|
||||
Reference in New Issue
Block a user