docs: update README and CLAUDE.md to v2.2.0

- Added documentation for audit tracking (IP address, invocation method).
- Updated database model descriptions for enhanced WorkOrder and Conversation fields.
- Documented the new UnifiedConfig system.
- Reflected enhanced logging transparency for knowledge base parsing.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
zhaojie
2026-02-11 00:08:09 +08:00
parent 2026007045
commit c3560b43fd
218 changed files with 3354 additions and 5096 deletions

View File

@@ -12,12 +12,14 @@ from typing import Dict, Any, Optional, List
from abc import ABC, abstractmethod
from dataclasses import dataclass
from src.config.unified_config import get_config
logger = logging.getLogger(__name__)
@dataclass
class LLMConfig:
"""LLM配置"""
provider: str # openai, anthropic, local, etc.
provider: str
api_key: str
base_url: Optional[str] = None
model: str = "gpt-3.5-turbo"
@@ -60,8 +62,8 @@ class OpenAIClient(BaseLLMClient):
async def generate(self, prompt: str, **kwargs) -> str:
"""生成文本"""
if not self.client:
return self._simulate_response(prompt)
raise ImportError("OpenAI client not initialized. Please install the 'openai' package.")
try:
response = await self.client.chat.completions.create(
model=self.config.model,
@@ -72,13 +74,13 @@ class OpenAIClient(BaseLLMClient):
return response.choices[0].message.content
except Exception as e:
logger.error(f"OpenAI API调用失败: {e}")
return self._simulate_response(prompt)
raise e
async def chat(self, messages: List[Dict[str, str]], **kwargs) -> str:
"""对话生成"""
if not self.client:
return self._simulate_chat(messages)
raise ImportError("OpenAI client not initialized. Please install the 'openai' package.")
try:
response = await self.client.chat.completions.create(
model=self.config.model,
@@ -89,7 +91,7 @@ class OpenAIClient(BaseLLMClient):
return response.choices[0].message.content
except Exception as e:
logger.error(f"OpenAI Chat API调用失败: {e}")
return self._simulate_chat(messages)
raise e
def _simulate_response(self, prompt: str) -> str:
"""模拟响应"""
@@ -194,30 +196,39 @@ class LocalLLMClient(BaseLLMClient):
class LLMClientFactory:
"""LLM客户端工厂"""
@staticmethod
def create_client(config: LLMConfig) -> BaseLLMClient:
"""创建LLM客户端"""
if config.provider.lower() == "openai":
provider = config.provider.lower()
# qwen 使用 OpenAI 兼容的 API
if provider in ["openai", "qwen"]:
return OpenAIClient(config)
elif config.provider.lower() == "anthropic":
elif provider == "anthropic":
return AnthropicClient(config)
elif config.provider.lower() == "local":
elif provider == "local":
return LocalLLMClient(config)
else:
raise ValueError(f"不支持的LLM提供商: {config.provider}")
class LLMManager:
"""LLM管理器"""
def __init__(self, config: LLMConfig):
self.config = config
self.client = LLMClientFactory.create_client(config)
def __init__(self, config=None):
if config:
self.config = config
else:
# If no config is provided, fetch it from the unified config system
self.config = get_config().llm
self.client = LLMClientFactory.create_client(self.config)
self.usage_stats = {
"total_requests": 0,
"total_tokens": 0,
"error_count": 0
}
async def generate(self, prompt: str, **kwargs) -> str:
"""生成文本"""