Merge branch 'develop' into fix/memory-enduser-config

This commit is contained in:
Ke Sun
2026-01-29 17:49:36 +08:00
41 changed files with 1788 additions and 561 deletions

View File

@@ -7,10 +7,11 @@ Routes:
GET /memory/config/emotion - 获取情绪引擎配置
POST /memory/config/emotion - 更新情绪引擎配置
"""
import uuid
from fastapi import APIRouter, Depends, Query, HTTPException, status
from pydantic import BaseModel, Field
from typing import Optional
from typing import Optional, Union
from sqlalchemy.orm import Session
from uuid import UUID
@@ -38,7 +39,7 @@ class EmotionConfigQuery(BaseModel):
class EmotionConfigUpdate(BaseModel):
"""情绪配置更新请求模型"""
config_id: UUID = Field(..., description="配置ID")
config_id: Union[uuid.UUID, int, str]= Field(..., description="配置ID")
emotion_enabled: bool = Field(..., description="是否启用情绪提取")
emotion_model_id: Optional[str] = Field(None, description="情绪分析专用模型ID")
emotion_extract_keywords: bool = Field(..., description="是否提取情绪关键词")
@@ -159,6 +160,7 @@ def update_emotion_config(
}
}
"""
config.config_id=resolve_config_id(config.config_id, db)
try:
api_logger.info(
f"用户 {current_user.username} 请求更新情绪配置",

View File

@@ -45,6 +45,7 @@ async def save_reflection_config(
"""Save reflection configuration to data_comfig table"""
try:
config_id = request.config_id
config_id = resolve_config_id(config_id, db)
if not config_id:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
@@ -164,6 +165,7 @@ async def start_reflection_configs(
db: Session = Depends(get_db),
) -> dict:
"""通过config_id查询memory_config表中的反思配置信息"""
config_id = resolve_config_id(config_id, db)
try:
config_id=resolve_config_id(config_id,db)
api_logger.info(f"用户 {current_user.username} 查询反思配置config_id: {config_id}")

View File

@@ -33,6 +33,7 @@ from app.services.memory_storage_service import (
search_entity,
search_statement,
)
from app.utils.config_utils import resolve_config_id
# Get API logger
api_logger = get_api_logger()
@@ -81,7 +82,6 @@ def create_config(
db: Session = Depends(get_db),
) -> dict:
workspace_id = current_user.current_workspace_id
# 检查用户是否已选择工作空间
if workspace_id is None:
api_logger.warning(f"用户 {current_user.username} 尝试创建配置但未选择工作空间")
@@ -101,7 +101,7 @@ def create_config(
@router.delete("/delete_config", response_model=ApiResponse) # 删除数据库中的内容(按配置名称)
def delete_config(
config_id: UUID,
config_id: UUID|int,
force: bool = Query(False, description="是否强制删除(即使有终端用户正在使用)"),
current_user: User = Depends(get_current_user),
db: Session = Depends(get_db),
@@ -117,7 +117,7 @@ def delete_config(
force: 设置为 true 可强制删除(即使有终端用户正在使用)
"""
workspace_id = current_user.current_workspace_id
config_id=resolve_config_id(config_id, db)
# 检查用户是否已选择工作空间
if workspace_id is None:
api_logger.warning(f"用户 {current_user.username} 尝试删除配置但未选择工作空间")
@@ -180,7 +180,7 @@ def update_config(
db: Session = Depends(get_db),
) -> dict:
workspace_id = current_user.current_workspace_id
payload.config_id = resolve_config_id(payload.config_id, db)
# 检查用户是否已选择工作空间
if workspace_id is None:
api_logger.warning(f"用户 {current_user.username} 尝试更新配置但未选择工作空间")
@@ -203,7 +203,7 @@ def update_config_extracted(
db: Session = Depends(get_db),
) -> dict:
workspace_id = current_user.current_workspace_id
payload.config_id = resolve_config_id(payload.config_id, db)
# 检查用户是否已选择工作空间
if workspace_id is None:
api_logger.warning(f"用户 {current_user.username} 尝试更新提取配置但未选择工作空间")
@@ -230,7 +230,7 @@ def read_config_extracted(
db: Session = Depends(get_db),
) -> dict:
workspace_id = current_user.current_workspace_id
config_id = resolve_config_id(config_id, db)
# 检查用户是否已选择工作空间
if workspace_id is None:
api_logger.warning(f"用户 {current_user.username} 尝试读取提取配置但未选择工作空间")
@@ -278,6 +278,7 @@ async def pilot_run(
f"Pilot run requested: config_id={payload.config_id}, "
f"dialogue_text_length={len(payload.dialogue_text)}"
)
payload.config_id = resolve_config_id(payload.config_id, db)
svc = DataConfigService(db)
return StreamingResponse(
svc.pilot_run_stream(payload),

View File

@@ -7,7 +7,7 @@ from app.core.error_codes import BizCode
from app.core.exceptions import BusinessException
from app.db import get_db
from app.dependencies import get_current_user
from app.models.models_model import ModelProvider, ModelType
from app.models.models_model import ModelProvider, ModelType, LoadBalanceStrategy
from app.models.user_model import User
from app.repositories.model_repository import ModelConfigRepository
from app.schemas import model_schema
@@ -33,6 +33,10 @@ def get_model_types():
def get_model_providers():
return success(msg="获取模型提供商成功", data=list(ModelProvider))
@router.get("/strategy", response_model=ApiResponse)
def get_model_strategies():
return success(msg="获取模型策略成功", data=list(LoadBalanceStrategy))
@router.get("", response_model=ApiResponse)
def get_model_list(
@@ -91,7 +95,7 @@ def get_model_list(
@router.get("/new", response_model=ApiResponse)
def get_model_list(
def get_model_list_new(
type: Optional[list[str]] = Query(None, description="模型类型筛选(支持多个,如 ?type=LLM 或 ?type=LLM,EMBEDDING"),
provider: Optional[model_schema.ModelProvider] = Query(None, description="提供商筛选(基于ModelConfig)"),
is_active: Optional[bool] = Query(None, description="激活状态筛选"),
@@ -198,6 +202,10 @@ def update_model_base(
):
"""更新基础模型"""
# 不允许更改type类型
if data.type is not None or data.provider is not None:
raise BusinessException("不允许更改模型类型和供应商", BizCode.INVALID_PARAMETER)
result = ModelBaseService.update_model_base(db=db, model_base_id=model_base_id, data=data)
return success(data=model_schema.ModelBase.model_validate(result), msg="基础模型更新成功")
@@ -318,6 +326,8 @@ async def update_composite_model(
api_logger.info(f"更新组合模型请求: model_id={model_id}, 用户: {current_user.username}")
try:
if model_data.type is not None:
raise BusinessException("不允许更改模型类型和供应商", BizCode.INVALID_PARAMETER)
result_orm = await ModelConfigService.update_composite_model(db=db, model_id=model_id, model_data=model_data, tenant_id=current_user.tenant_id)
api_logger.info(f"组合模型更新成功: {result_orm.name} (ID: {model_id})")
@@ -460,8 +470,8 @@ async def create_model_api_key_by_provider(
created_keys = await ModelApiKeyService.create_api_key_by_provider(db=db, data=create_data)
api_logger.info(f"API Key创建成功: 关联{len(created_keys)}个模型")
result_list = [model_schema.ModelApiKey.model_validate(key) for key in created_keys]
return success(data=result_list, msg=f"成功为 {len(created_keys)} 个模型创建API Key")
# result_list = [model_schema.ModelApiKey.model_validate(key) for key in created_keys]
return success(data=f"成功为 {len(created_keys)} 个模型创建API Key", msg=f"成功为 {len(created_keys)} 个模型创建API Key")
except Exception as e:
api_logger.error(f"创建API Key失败: {str(e)}")
raise

View File

@@ -235,11 +235,11 @@ async def chat(
message=payload.message,
conversation_id=conversation.id, # 使用已创建的会话 ID
user_id=new_end_user.id, # 转换为字符串
user_id=end_user_id, # 转换为字符串
variables=payload.variables,
config=config,
web_search=web_search,
memory=payload.memory,
memory=memory,
storage_type=storage_type,
user_rag_memory_id=user_rag_memory_id,
app_id=app.id,
@@ -268,11 +268,11 @@ async def chat(
message=payload.message,
conversation_id=conversation.id, # 使用已创建的会话 ID
user_id=new_end_user.id, # 转换为字符串
user_id=end_user_id, # 转换为字符串
variables=payload.variables,
config=config,
web_search=web_search,
memory=payload.memory,
memory=memory,
storage_type=storage_type,
user_rag_memory_id=user_rag_memory_id,
app_id=app.id,

View File

@@ -0,0 +1 @@
"""模型配置脚本模块"""

View File

@@ -0,0 +1,174 @@
provider: bedrock
enabled: true
models:
- name: ai21
type: llm
provider: bedrock
description: AI21 Labs大语言模型completion生成模式256000上下文窗口
is_deprecated: false
is_official: true
tags:
- 大语言模型
logo: bedrock
- name: amazon nova
type: llm
provider: bedrock
description: Amazon Nova大语言模型支持智能体思考、工具调用、流式工具调用、视觉能力300000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- stream-tool-call
- vision
logo: bedrock
- name: anthropic claude
type: llm
provider: bedrock
description: Anthropic Claude大语言模型支持智能体思考、视觉能力、工具调用、流式工具调用、文档处理200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
logo: bedrock
- name: cohere
type: llm
provider: bedrock
description: Cohere大语言模型支持智能体思考、工具调用、流式工具调用128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- stream-tool-call
logo: bedrock
- name: deepseek
type: llm
provider: bedrock
description: DeepSeek大语言模型支持智能体思考、视觉能力、工具调用、流式工具调用32768上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- vision
- tool-call
- stream-tool-call
logo: bedrock
- name: meta
type: llm
provider: bedrock
description: Meta Llama大语言模型支持智能体思考、工具调用128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
logo: bedrock
- name: mistral
type: llm
provider: bedrock
description: Mistral AI大语言模型支持智能体思考、工具调用32000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
logo: bedrock
- name: openai
type: llm
provider: bedrock
description: OpenAI大语言模型支持智能体思考、工具调用、流式工具调用32768上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- stream-tool-call
logo: bedrock
- name: qwen
type: llm
provider: bedrock
description: Qwen大语言模型支持智能体思考、工具调用、流式工具调用32768上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- stream-tool-call
logo: bedrock
- name: amazon.rerank-v1:0
type: rerank
provider: bedrock
description: amazon.rerank-v1:0重排序模型5120上下文窗口
is_deprecated: false
is_official: true
tags:
- 重排序模型
logo: bedrock
- name: cohere.rerank-v3-5:0
type: rerank
provider: bedrock
description: cohere.rerank-v3-5:0重排序模型5120上下文窗口
is_deprecated: false
is_official: true
tags:
- 重排序模型
logo: bedrock
- name: amazon.nova-2-multimodal-embeddings-v1:0
type: embedding
provider: bedrock
description: amazon.nova-2-multimodal-embeddings-v1:0文本嵌入模型支持视觉能力8192上下文窗口
is_deprecated: false
is_official: true
tags:
- 文本嵌入模型
- vision
logo: bedrock
- name: amazon.titan-embed-text-v1
type: embedding
provider: bedrock
description: amazon.titan-embed-text-v1文本嵌入模型8192上下文窗口
is_deprecated: false
is_official: true
tags:
- 文本嵌入模型
logo: bedrock
- name: amazon.titan-embed-text-v2:0
type: embedding
provider: bedrock
description: amazon.titan-embed-text-v2:0文本嵌入模型8192上下文窗口
is_deprecated: false
is_official: true
tags:
- 文本嵌入模型
logo: bedrock
- name: cohere.embed-english-v3
type: embedding
provider: bedrock
description: Cohere Embed 3 English文本嵌入模型512上下文窗口
is_deprecated: false
is_official: true
tags:
- 文本嵌入模型
logo: bedrock
- name: cohere.embed-multilingual-v3
type: embedding
provider: bedrock
description: Cohere Embed 3 Multilingual文本嵌入模型512上下文窗口
is_deprecated: false
is_official: true
tags:
- 文本嵌入模型
logo: bedrock

View File

@@ -0,0 +1,820 @@
provider: dashscope
enabled: true
models:
- name: deepseek-r1-distill-qwen-14b
type: llm
provider: dashscope
description: DeepSeek-R1-Distill-Qwen-14B大语言模型支持智能体思考32000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
logo: dashscope
- name: deepseek-r1-distill-qwen-32b
type: llm
provider: dashscope
description: DeepSeek-R1-Distill-Qwen-32B大语言模型支持智能体思考32000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
logo: dashscope
- name: deepseek-r1
type: llm
provider: dashscope
description: DeepSeek-R1大语言模型支持智能体思考131072超大上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
logo: dashscope
- name: deepseek-v3.1
type: llm
provider: dashscope
description: DeepSeek-V3.1大语言模型支持智能体思考131072超大上下文窗口对话模式支持丰富生成参数调节
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
logo: dashscope
- name: deepseek-v3.2-exp
type: llm
provider: dashscope
description: DeepSeek-V3.2-exp实验版大语言模型支持智能体思考131072超大上下文窗口对话模式支持丰富生成参数调节
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
logo: dashscope
- name: deepseek-v3.2
type: llm
provider: dashscope
description: DeepSeek-V3.2大语言模型支持智能体思考131072超大上下文窗口对话模式支持丰富生成参数调节
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
logo: dashscope
- name: deepseek-v3
type: llm
provider: dashscope
description: DeepSeek-V3大语言模型支持智能体思考64000上下文窗口对话模式支持文本与JSON格式输出
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
logo: dashscope
- name: farui-plus
type: llm
provider: dashscope
description: farui-plus大语言模型支持多工具调用、智能体思考、流式工具调用12288上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: glm-4.7
type: llm
provider: dashscope
description: GLM-4.7大语言模型支持多工具调用、智能体思考、流式工具调用202752超大上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qvq-max-latest
type: llm
provider: dashscope
description: qvq-max-latest大语言模型支持视觉、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- vision
- agent-thought
- stream-tool-call
logo: dashscope
- name: qvq-max
type: llm
provider: dashscope
description: qvq-max大语言模型支持视觉、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- vision
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-coder-turbo-0919
type: llm
provider: dashscope
description: qwen-coder-turbo-0919代码专用大语言模型支持智能体思考131072上下文窗口对话模式已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- 代码模型
- agent-thought
logo: dashscope
- name: qwen-max-latest
type: llm
provider: dashscope
description: qwen-max-latest大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式支持联网搜索
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-max-longcontext
type: llm
provider: dashscope
description: qwen-max-longcontext长上下文大语言模型支持多工具调用、智能体思考、流式工具调用32000上下文窗口对话模式已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-max
type: llm
provider: dashscope
description: qwen-max大语言模型支持多工具调用、智能体思考、流式工具调用32768上下文窗口对话模式支持联网搜索
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-mt-plus
type: llm
provider: dashscope
description: qwen-mt-plus多语言翻译大语言模型支持智能体思考16384上下文窗口对话模式支持多语种互译与领域翻译适配
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 翻译模型
- agent-thought
logo: dashscope
- name: qwen-mt-turbo
type: llm
provider: dashscope
description: qwen-mt-turbo轻量化多语言翻译大语言模型支持智能体思考16384上下文窗口对话模式支持多语种互译与领域翻译适配
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 翻译模型
- agent-thought
logo: dashscope
- name: qwen-plus-0112
type: llm
provider: dashscope
description: qwen-plus-0112大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式支持联网搜索已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-plus-0125
type: llm
provider: dashscope
description: qwen-plus-0125大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式支持联网搜索已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-plus-0723
type: llm
provider: dashscope
description: qwen-plus-0723大语言模型支持多工具调用、智能体思考、流式工具调用32000上下文窗口对话模式支持联网搜索已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-plus-0806
type: llm
provider: dashscope
description: qwen-plus-0806大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式支持联网搜索已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-plus-0919
type: llm
provider: dashscope
description: qwen-plus-0919大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式支持联网搜索已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-plus-1125
type: llm
provider: dashscope
description: qwen-plus-1125大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式支持联网搜索已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-plus-1127
type: llm
provider: dashscope
description: qwen-plus-1127大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式支持联网搜索已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-plus-1220
type: llm
provider: dashscope
description: qwen-plus-1220大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen-vl-max
type: llm
provider: dashscope
description: qwen-vl-max多模态大模型支持视觉理解、智能体思考、视频理解131072上下文窗口对话模式未废弃
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwen-vl-plus-0809
type: llm
provider: dashscope
description: qwen-vl-plus-0809多模态大模型支持视觉理解、智能体思考、视频理解32768上下文窗口对话模式已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwen-vl-plus-2025-01-02
type: llm
provider: dashscope
description: qwen-vl-plus-2025-01-02多模态大模型支持视觉理解、智能体思考、视频理解32768上下文窗口对话模式未废弃
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwen-vl-plus-2025-01-25
type: llm
provider: dashscope
description: qwen-vl-plus-2025-01-25多模态大模型支持视觉理解、智能体思考、视频理解131072上下文窗口对话模式未废弃
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwen-vl-plus-latest
type: llm
provider: dashscope
description: qwen-vl-plus-latest多模态大模型支持视觉理解、智能体思考、视频理解131072上下文窗口对话模式未废弃
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwen-vl-plus
type: llm
provider: dashscope
description: qwen-vl-plus多模态大模型支持视觉理解、智能体思考、视频理解131072上下文窗口对话模式未废弃
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwen2.5-0.5b-instruct
type: llm
provider: dashscope
description: qwen2.5-0.5b-instruct大语言模型支持多工具调用、智能体思考、流式工具调用32768上下文窗口对话模式未废弃
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-14b
type: llm
provider: dashscope
description: qwen3-14b大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-235b-a22b-instruct-2507
type: llm
provider: dashscope
description: qwen3-235b-a22b-instruct-2507大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-235b-a22b-thinking-2507
type: llm
provider: dashscope
description: qwen3-235b-a22b-thinking-2507大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-235b-a22b
type: llm
provider: dashscope
description: qwen3-235b-a22b大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-30b-a3b-instruct-2507
type: llm
provider: dashscope
description: qwen3-30b-a3b-instruct-2507大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-30b-a3b
type: llm
provider: dashscope
description: qwen3-30b-a3b大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-32b
type: llm
provider: dashscope
description: qwen3-32b大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-4b
type: llm
provider: dashscope
description: qwen3-4b大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-8b
type: llm
provider: dashscope
description: qwen3-8b大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-coder-30b-a3b-instruct
type: llm
provider: dashscope
description: qwen3-coder-30b-a3b-instruct大语言模型支持智能体思考262144上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 代码模型
- agent-thought
logo: dashscope
- name: qwen3-coder-480b-a35b-instruct
type: llm
provider: dashscope
description: qwen3-coder-480b-a35b-instruct大语言模型支持智能体思考262144上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 代码模型
- agent-thought
logo: dashscope
- name: qwen3-coder-plus-2025-09-23
type: llm
provider: dashscope
description: qwen3-coder-plus-2025-09-23大语言模型支持智能体思考1000000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 代码模型
- agent-thought
logo: dashscope
- name: qwen3-coder-plus
type: llm
provider: dashscope
description: qwen3-coder-plus大语言模型支持智能体思考1000000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 代码模型
- agent-thought
logo: dashscope
- name: qwen3-max-2025-09-23
type: llm
provider: dashscope
description: qwen3-max-2025-09-23大语言模型支持多工具调用、智能体思考、流式工具调用262144上下文窗口对话模式支持联网搜索
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
- 联网搜索
logo: dashscope
- name: qwen3-max-2026-01-23
type: llm
provider: dashscope
description: qwen3-max-2026-01-23大语言模型支持多工具调用、智能体思考、流式工具调用262144上下文窗口对话模式支持联网搜索
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
- 联网搜索
logo: dashscope
- name: qwen3-max-preview
type: llm
provider: dashscope
description: qwen3-max-preview大语言模型支持多工具调用、智能体思考、流式工具调用262144上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-max
type: llm
provider: dashscope
description: qwen3-max大语言模型支持多工具调用、智能体思考、流式工具调用262144上下文窗口对话模式支持联网搜索
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
- 联网搜索
logo: dashscope
- name: qwen3-next-80b-a3b-instruct
type: llm
provider: dashscope
description: qwen3-next-80b-a3b-instruct大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-next-80b-a3b-thinking
type: llm
provider: dashscope
description: qwen3-next-80b-a3b-thinking大语言模型支持多工具调用、智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwen3-omni-flash-2025-12-01
type: llm
provider: dashscope
description: qwen3-omni-flash-2025-12-01多模态大语言模型支持视觉、智能体思考、视频、音频能力65536上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
- audio
logo: dashscope
- name: qwen3-vl-235b-a22b-instruct
type: llm
provider: dashscope
description: qwen3-vl-235b-a22b-instruct多模态大语言模型支持多工具调用、智能体思考、流式工具调用、视觉、视频能力131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
- video
logo: dashscope
- name: qwen3-vl-235b-a22b-thinking
type: llm
provider: dashscope
description: qwen3-vl-235b-a22b-thinking多模态大语言模型支持多工具调用、智能体思考、流式工具调用、视觉、视频能力131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
- video
logo: dashscope
- name: qwen3-vl-30b-a3b-instruct
type: llm
provider: dashscope
description: qwen3-vl-30b-a3b-instruct多模态大语言模型支持多工具调用、智能体思考、流式工具调用、视觉、视频能力131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
- video
logo: dashscope
- name: qwen3-vl-30b-a3b-thinking
type: llm
provider: dashscope
description: qwen3-vl-30b-a3b-thinking多模态大语言模型支持多工具调用、智能体思考、流式工具调用、视觉、视频能力131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
- video
logo: dashscope
- name: qwen3-vl-flash
type: llm
provider: dashscope
description: qwen3-vl-flash多模态大语言模型支持多工具调用、智能体思考、流式工具调用、视觉、视频能力131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
- video
logo: dashscope
- name: qwen3-vl-plus-2025-09-23
type: llm
provider: dashscope
description: qwen3-vl-plus-2025-09-23多模态大语言模型支持视觉、智能体思考、视频能力262144上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwen3-vl-plus
type: llm
provider: dashscope
description: qwen3-vl-plus多模态大语言模型支持视觉、智能体思考、视频能力262144上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- 多模态模型
- vision
- agent-thought
- video
logo: dashscope
- name: qwq-32b
type: llm
provider: dashscope
description: qwq-32b大语言模型支持智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwq-plus-0305
type: llm
provider: dashscope
description: qwq-plus-0305大语言模型支持智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- stream-tool-call
logo: dashscope
- name: qwq-plus
type: llm
provider: dashscope
description: qwq-plus大语言模型支持智能体思考、流式工具调用131072上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- stream-tool-call
logo: dashscope
- name: gte-rerank-v2
type: rerank
provider: dashscope
description: gte-rerank-v2重排序模型4000上下文窗口
is_deprecated: false
is_official: true
tags:
- 重排序模型
logo: dashscope
- name: gte-rerank
type: rerank
provider: dashscope
description: gte-rerank重排序模型4000上下文窗口
is_deprecated: false
is_official: true
tags:
- 重排序模型
logo: dashscope
- name: multimodal-embedding-v1
type: embedding
provider: dashscope
description: multimodal-embedding-v1多模态嵌入模型支持视觉能力8192上下文窗口最大分块数10
is_deprecated: false
is_official: true
tags:
- 嵌入模型
- 多模态模型
- vision
logo: dashscope
- name: text-embedding-v1
type: embedding
provider: dashscope
description: text-embedding-v1文本嵌入模型2048上下文窗口最大分块数25
is_deprecated: false
is_official: true
tags:
- 嵌入模型
- 文本嵌入
logo: dashscope
- name: text-embedding-v2
type: embedding
provider: dashscope
description: text-embedding-v2文本嵌入模型2048上下文窗口最大分块数25
is_deprecated: false
is_official: true
tags:
- 嵌入模型
- 文本嵌入
logo: dashscope
- name: text-embedding-v3
type: embedding
provider: dashscope
description: text-embedding-v3文本嵌入模型8192上下文窗口最大分块数10
is_deprecated: false
is_official: true
tags:
- 嵌入模型
- 文本嵌入
logo: dashscope
- name: text-embedding-v4
type: embedding
provider: dashscope
description: text-embedding-v4文本嵌入模型8192上下文窗口最大分块数10
is_deprecated: false
is_official: true
tags:
- 嵌入模型
- 文本嵌入
logo: dashscope

View File

@@ -0,0 +1,143 @@
"""模型配置加载器 - 用于将预定义模型批量导入到数据库"""
import os
from pathlib import Path
from typing import Callable
import yaml
from sqlalchemy.orm import Session
from app.models.models_model import ModelBase, ModelProvider
def _load_yaml_config(provider: ModelProvider) -> list[dict]:
"""从YAML文件加载指定供应商的模型配置"""
config_dir = Path(__file__).parent
config_file = config_dir / f"{provider.value}_models.yaml"
if not config_file.exists():
return []
with open(config_file, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
# 检查是否需要加载(默认为 true
if not data.get('enabled', True):
return []
return data.get('models', [])
def _disable_yaml_config(provider: ModelProvider) -> None:
"""将YAML文件的enabled标志设置为false"""
config_dir = Path(__file__).parent
config_file = config_dir / f"{provider.value}_models.yaml"
if not config_file.exists():
return
with open(config_file, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
data['enabled'] = False
with open(config_file, 'w', encoding='utf-8') as f:
yaml.dump(data, f, allow_unicode=True, sort_keys=False)
def load_models(db: Session, providers: list[str] = None, silent: bool = False) -> dict:
"""
加载模型配置到数据库
Args:
db: 数据库会话
providers: 要加载的供应商列表None表示加载所有
silent: 是否静默模式(不输出详细日志)
Returns:
dict: 加载结果统计 {"success": int, "skipped": int, "failed": int}
"""
result = {"success": 0, "skipped": 0, "failed": 0}
# 确定要加载的供应商
if providers:
target_providers = [ModelProvider(p) if isinstance(p, str) else p for p in providers]
else:
target_providers = [p for p in ModelProvider if p != ModelProvider.COMPOSITE]
for provider in target_providers:
# 从YAML文件加载模型配置
models = _load_yaml_config(provider)
if not models:
if not silent:
print(f"警告: 供应商 '{provider.value}' 暂无预定义模型")
continue
if not silent:
print(f"\n正在加载 {provider.value}{len(models)} 个模型...")
# provider_success = 0
for model_data in models:
try:
# 检查模型是否已存在
existing = db.query(ModelBase).filter(
ModelBase.name == model_data["name"],
ModelBase.provider == model_data["provider"]
).first()
if existing:
# 更新现有模型配置
for key, value in model_data.items():
setattr(existing, key, value)
db.commit()
if not silent:
print(f"更新成功: {model_data['name']}")
result["success"] += 1
# provider_success += 1
else:
# 创建新模型
model = ModelBase(**model_data)
db.add(model)
db.commit()
if not silent:
print(f"添加成功: {model_data['name']}")
result["success"] += 1
# provider_success += 1
except Exception as e:
db.rollback()
if not silent:
print(f"添加失败: {model_data['name']} - {str(e)}")
result["failed"] += 1
# 如果该供应商的模型全部加载成功将enabled设置为false
# if provider_success == len(models):
_disable_yaml_config(provider)
return result
def load_models_by_provider(db: Session, provider: str) -> dict:
"""
加载指定供应商的模型配置
Args:
db: 数据库会话
provider: 供应商名称字符串或ModelProvider枚举
Returns:
dict: 加载结果统计
"""
provider_enum = ModelProvider(provider) if isinstance(provider, str) else provider
return load_models(db, providers=[provider_enum])
def get_available_providers() -> list[Callable[[], str]]:
"""获取所有可用的供应商列表从ModelProvider枚举获取排除COMPOSITE"""
return [p.value for p in ModelProvider if p != ModelProvider.COMPOSITE]
def get_models_by_provider(provider: str) -> list[dict]:
"""获取指定供应商的模型配置列表"""
provider_enum = ModelProvider(provider) if isinstance(provider, str) else provider
return _load_yaml_config(provider_enum)

View File

@@ -0,0 +1,294 @@
provider: openai
enabled: true
models:
- name: chatgpt-4o-latest
type: llm
provider: openai
description: chatgpt-4o-latest大语言模型支持多工具调用、智能体思考、流式工具调用、视觉能力128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
logo: openai
- name: gpt-3.5-turbo-0125
type: llm
provider: openai
description: gpt-3.5-turbo-0125大语言模型支持多工具调用、智能体思考、流式工具调用16385上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: openai
- name: gpt-3.5-turbo-1106
type: llm
provider: openai
description: gpt-3.5-turbo-1106大语言模型支持多工具调用、智能体思考、流式工具调用16385上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: openai
- name: gpt-3.5-turbo-16k
type: llm
provider: openai
description: gpt-3.5-turbo-16k大语言模型支持多工具调用、智能体思考、流式工具调用16385上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: openai
- name: gpt-3.5-turbo-instruct
type: llm
provider: openai
description: gpt-3.5-turbo-instruct大语言模型4096上下文窗口文本补全模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
logo: openai
- name: gpt-3.5-turbo
type: llm
provider: openai
description: gpt-3.5-turbo大语言模型支持多工具调用、智能体思考、流式工具调用16385上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: openai
- name: gpt-4-0125-preview
type: llm
provider: openai
description: gpt-4-0125-preview大语言模型支持多工具调用、智能体思考、流式工具调用128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: openai
- name: gpt-4-1106-preview
type: llm
provider: openai
description: gpt-4-1106-preview大语言模型支持多工具调用、智能体思考、流式工具调用128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: openai
- name: gpt-4-turbo-2024-04-09
type: llm
provider: openai
description: gpt-4-turbo-2024-04-09大语言模型支持多工具调用、智能体思考、流式工具调用、视觉能力128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
logo: openai
- name: gpt-4-turbo-preview
type: llm
provider: openai
description: gpt-4-turbo-preview大语言模型支持多工具调用、智能体思考、流式工具调用128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
logo: openai
- name: gpt-4-turbo
type: llm
provider: openai
description: gpt-4-turbo大语言模型支持多工具调用、智能体思考、流式工具调用、视觉能力128000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
logo: openai
- name: o1-preview
type: llm
provider: openai
description: o1-preview大语言模型支持智能体思考128000上下文窗口对话模式已废弃
is_deprecated: true
is_official: true
tags:
- 大语言模型
- agent-thought
logo: openai
- name: o1
type: llm
provider: openai
description: o1大语言模型支持多工具调用、智能体思考、流式工具调用、视觉能力、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
- structured-output
logo: openai
- name: o3-2025-04-16
type: llm
provider: openai
description: o3-2025-04-16大语言模型支持智能体思考、工具调用、视觉能力、流式工具调用、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- vision
- stream-tool-call
- structured-output
logo: openai
- name: o3-mini-2025-01-31
type: llm
provider: openai
description: o3-mini-2025-01-31大语言模型支持智能体思考、工具调用、流式工具调用、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- stream-tool-call
- structured-output
logo: openai
- name: o3-mini
type: llm
provider: openai
description: o3-mini大语言模型支持智能体思考、工具调用、流式工具调用、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- stream-tool-call
- structured-output
logo: openai
- name: o3-pro-2025-06-10
type: llm
provider: openai
description: o3-pro-2025-06-10大语言模型支持智能体思考、工具调用、视觉能力、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- vision
- structured-output
logo: openai
- name: o3-pro
type: llm
provider: openai
description: o3-pro大语言模型支持智能体思考、工具调用、视觉能力、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- vision
- structured-output
logo: openai
- name: o3
type: llm
provider: openai
description: o3大语言模型支持智能体思考、视觉能力、工具调用、流式工具调用、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- vision
- tool-call
- stream-tool-call
- structured-output
logo: openai
- name: o4-mini-2025-04-16
type: llm
provider: openai
description: o4-mini-2025-04-16大语言模型支持智能体思考、工具调用、视觉能力、流式工具调用、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- vision
- stream-tool-call
- structured-output
logo: openai
- name: o4-mini
type: llm
provider: openai
description: o4-mini大语言模型支持智能体思考、工具调用、视觉能力、流式工具调用、结构化输出200000上下文窗口对话模式
is_deprecated: false
is_official: true
tags:
- 大语言模型
- agent-thought
- tool-call
- vision
- stream-tool-call
- structured-output
logo: openai
- name: text-embedding-3-large
type: embedding
provider: openai
description: text-embedding-3-large文本向量模型8191上下文窗口最大分块数32
is_deprecated: false
is_official: true
tags:
- 文本向量模型
logo: openai
- name: text-embedding-3-small
type: embedding
provider: openai
description: text-embedding-3-small文本向量模型8191上下文窗口最大分块数32
is_deprecated: false
is_official: true
tags:
- 文本向量模型
logo: openai
- name: text-embedding-ada-002
type: embedding
provider: openai
description: text-embedding-ada-002文本向量模型8097上下文窗口最大分块数32
is_deprecated: false
is_official: true
tags:
- 文本向量模型
logo: openai

View File

@@ -16,6 +16,8 @@ from app.core.error_codes import BizCode, HTTP_MAPPING
from app.core.exceptions import BusinessException
from app.core.logging_config import LoggingConfig, get_logger
from app.core.response_utils import fail
from app.core.models.scripts.loader import load_models
from app.db import get_db_context
# Initialize logging system
LoggingConfig.setup_logging()
@@ -47,6 +49,15 @@ async def lifespan(app: FastAPI):
else:
logger.info("自动数据库升级已禁用 (DB_AUTO_UPGRADE=false)")
# 加载预定义模型
logger.info("开始加载预定义模型...")
try:
with get_db_context() as db:
result = load_models(db, silent=True)
logger.info(f"预定义模型加载完成: 成功{result['success']}个, 跳过{result['skipped']}个, 失败{result['failed']}")
except Exception as e:
logger.warning(f"加载预定义模型时出错: {str(e)}")
logger.info("应用程序启动完成")
yield
# 应用关闭事件

View File

@@ -5,6 +5,7 @@ from enum import StrEnum
from sqlalchemy import Column, String, Boolean, DateTime, Text, ForeignKey, Enum as SQLEnum, UniqueConstraint, Integer, ARRAY, Table
from sqlalchemy.dialects.postgresql import UUID, JSON
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from app.db import Base
@@ -23,6 +24,8 @@ class ModelType(StrEnum):
CHAT = "chat"
EMBEDDING = "embedding"
RERANK = "rerank"
# TTS = "tts"
# SPEECH2TEXT = "speech2text"
# IMAGE = "image"
# AUDIO = "audio"
# VISION = "vision"
@@ -48,8 +51,7 @@ class ModelProvider(StrEnum):
class LoadBalanceStrategy(StrEnum):
"""API Key负载均衡策略枚举"""
ROUND_ROBIN = "round_robin" # 轮询
WEIGHTED_ROUND_ROBIN = "weighted_round_robin" # 加权轮询
RANDOM = "random" # 随机
NONE = "none" #
# 多对多关联表
@@ -90,7 +92,8 @@ class ModelConfig(BaseModel):
# 状态管理
is_public = Column(Boolean, default=False, nullable=False, comment="是否公开")
load_balance_strategy = Column(String, nullable=True, comment="负载均衡策略")
load_balance_strategy = Column(String, nullable=True, comment="负载均衡策略", default=LoadBalanceStrategy.NONE,
server_default=LoadBalanceStrategy.NONE)
# 关联关系
model_base = relationship("ModelBase", back_populates="configs")
@@ -151,6 +154,7 @@ class ModelBase(Base):
is_official = Column(Boolean, default=True, comment="是否供应商官方模型(区分自定义)")
tags = Column(ARRAY(String), default=list, nullable=False, comment="模型标签(如['聊天', '创作']")
add_count = Column(Integer, default=0, nullable=False, comment="模型被用户添加的次数")
created_at = Column(DateTime, default=datetime.datetime.now, comment="创建时间", server_default=func.now())
# 关联关系
configs = relationship("ModelConfig", back_populates="model_base", cascade="all, delete-orphan")

View File

@@ -165,7 +165,7 @@ class ModelConfigRepository:
total = base_query.count()
# 分页查询
models = base_query.order_by(desc(ModelConfig.updated_at)).offset(
models = base_query.order_by(desc(ModelConfig.created_at)).offset(
(query.page - 1) * query.pagesize
).limit(query.pagesize).all()
@@ -234,7 +234,7 @@ class ModelConfigRepository:
# 获取总数
total = base_query.count()
query_results = base_query.order_by(desc(ModelConfig.updated_at)).all()
query_results = base_query.order_by(desc(ModelConfig.created_at)).all()
provider_groups: Dict[str, List[ModelConfig]] = {}
for model_config in query_results:
@@ -433,6 +433,7 @@ class ModelConfigRepository:
ModelConfig.is_public
),
ModelBase.provider == provider,
ModelConfig.is_active,
~ModelConfig.is_composite
)
).distinct().all()
@@ -621,7 +622,7 @@ class ModelBaseRepository:
if filters:
q = q.filter(and_(*filters))
return q.order_by(ModelBase.add_count.desc()).all()
return q.order_by(ModelBase.add_count.desc(), ModelBase.created_at.desc()).all()
@staticmethod
def create(db: Session, data: dict) -> 'ModelBase':
@@ -636,6 +637,17 @@ class ModelBaseRepository:
return None
for key, value in data.items():
setattr(model_base, key, value)
# 同步更新绑定的非组合模型配置
if any(k in data for k in ['name', 'description', 'logo']):
db.query(ModelConfig).filter(
ModelConfig.model_id == model_base_id,
ModelConfig.is_composite == False
).update({
k: v for k, v in data.items()
if k in ['name', 'description', 'logo']
}, synchronize_session=False)
return model_base
@staticmethod

View File

@@ -1,5 +1,7 @@
import uuid
from pydantic import BaseModel, Field
from typing import Optional
from typing import Optional, Union
from uuid import UUID
from enum import Enum
@@ -10,7 +12,7 @@ class OptimizationStrategy(str, Enum):
ACCURACY_FIRST = "accuracy_first"
BALANCED = "balanced"
class Memory_Reflection(BaseModel):
config_id: Optional[UUID] = None
config_id: Union[uuid.UUID, int, str] = None
reflection_enabled: bool
reflection_period_in_hours: str
reflexion_range: Optional[str] = "partial"

View File

@@ -147,7 +147,7 @@ class ReflexionResultSchema(BaseModel):
# Composite key identifying a config row
class ConfigKey(BaseModel): # 配置参数键模型
model_config = ConfigDict(populate_by_name=True, extra="forbid")
config_id: Union[uuid.UUID, int] = Field(..., description="配置唯一标识UUID或int)")
config_id:Union[uuid.UUID, int, str] = Field(..., description="配置唯一标识UUID或int)")
user_id: str = Field("user_id", description="用户标识(字符串)")
apply_id: str = Field("apply_id", description="应用或场景标识(字符串)")
@@ -238,17 +238,17 @@ class ConfigParamsCreate(BaseModel): # 创建配置参数模型(仅 body
class ConfigParamsDelete(BaseModel): # 删除配置参数模型(请求体)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
# config_name: str = Field("配置名称", description="配置名称(字符串)")
config_id: uuid.UUID = Field("配置ID", description="配置IDUUID")
config_id:Union[uuid.UUID, int, str] = Field(..., description="配置ID支持UUID、整数或字符串")
class ConfigUpdate(BaseModel): # 更新记忆萃取引擎配置参数时使用的模型
config_id: Optional[uuid.UUID] = None
config_id: Union[uuid.UUID, int, str] = None
config_name: str = Field("配置名称", description="配置名称(字符串)")
config_desc: str = Field("配置描述", description="配置描述(字符串)")
class ConfigUpdateExtracted(BaseModel): # 更新记忆萃取引擎配置参数时使用的模型
config_id: Optional[uuid.UUID] = None
config_id:Union[uuid.UUID, int, str] = None
llm_id: Optional[str] = Field(None, description="LLM模型配置ID")
embedding_id: Optional[str] = Field(None, description="嵌入模型配置ID")
rerank_id: Optional[str] = Field(None, description="重排序模型配置ID")
@@ -315,14 +315,14 @@ class ConfigUpdateExtracted(BaseModel): # 更新记忆萃取引擎配置参数
class ConfigUpdateForget(BaseModel): # 更新遗忘引擎配置参数时使用的模型
# 遗忘引擎配置参数更新模型
config_id: Optional[uuid.UUID] = None
config_id:Union[uuid.UUID, int, str] = None
lambda_time: Optional[float] = Field(0.5, ge=0.0, le=1.0, description="最低保持度0-1 小数;默认 0.5")
lambda_mem: Optional[float] = Field(0.5, ge=0.0, le=1.0, description="遗忘率0-1 小数;默认 0.5")
offset: Optional[float] = Field(0.0, ge=0.0, le=1.0, description="偏移度0-1 小数;默认 0.0")
class ConfigPilotRun(BaseModel): # 试运行触发请求模型
config_id: uuid.UUID = Field(..., description="配置ID唯一")
config_id:Union[uuid.UUID, int, str] = Field(..., description="配置ID唯一支持UUID、整数或字符串")
dialogue_text: str = Field(..., description="前端传入的对话文本,格式如 '用户: ...\nAI: ...' 可多行,试运行必填")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
@@ -330,7 +330,7 @@ class ConfigPilotRun(BaseModel): # 试运行触发请求模型
class ConfigFilter(BaseModel): # 查询配置参数时使用的模型
model_config = ConfigDict(populate_by_name=True, extra="forbid")
config_id: Optional[uuid.UUID] = None
config_id: Union[uuid.UUID, int, str] = None
user_id: Optional[str] = None
apply_id: Optional[str] = None
@@ -406,7 +406,7 @@ class ForgettingConfigResponse(BaseModel):
"""遗忘引擎配置响应模型"""
model_config = ConfigDict(populate_by_name=True, extra="forbid")
config_id: uuid.UUID = Field(..., description="配置ID")
config_id: Union[uuid.UUID, int, str] = Field(..., description="配置ID支持UUID、整数或字符串")
decay_constant: float = Field(..., description="衰减常数 d")
lambda_time: float = Field(..., description="时间衰减参数")
lambda_mem: float = Field(..., description="记忆衰减参数")

View File

@@ -3,14 +3,12 @@ from typing import Optional, List, Dict, Any
import datetime
import uuid
from app.models.models_model import ModelProvider, ModelType
from app.models.models_model import ModelProvider, ModelType, LoadBalanceStrategy
from app.core.logging_config import get_business_logger
schema_logger = get_business_logger()
# ModelConfig Schemas
class ModelConfigBase(BaseModel):
"""模型配置基础Schema"""
@@ -22,6 +20,7 @@ class ModelConfigBase(BaseModel):
config: Optional[Dict[str, Any]] = Field({}, description="模型配置参数")
is_active: bool = Field(True, description="是否激活")
is_public: bool = Field(False, description="是否公开")
load_balance_strategy: Optional[str] = Field(LoadBalanceStrategy.NONE.value, description="负载均衡策略")
class ApiKeyCreateNested(BaseModel):
@@ -44,13 +43,14 @@ class ModelConfigCreate(ModelConfigBase):
class CompositeModelCreate(BaseModel):
"""创建组合模型Schema"""
name: str = Field(..., description="组合模型名称", max_length=255)
type: ModelType = Field(..., description="模型类型")
type: Optional[ModelType] = Field(None, description="模型类型")
logo: Optional[str] = Field(None, description="模型logo图片URL", max_length=255)
description: Optional[str] = Field(None, description="模型描述")
config: Optional[Dict[str, Any]] = Field({}, description="模型配置参数")
is_active: bool = Field(True, description="是否激活")
is_public: bool = Field(False, description="是否公开")
api_key_ids: List[uuid.UUID] = Field(..., description="绑定的API Key ID列表")
load_balance_strategy: Optional[str] = Field(default=LoadBalanceStrategy.NONE.value, description="负载均衡策略")
class ModelConfigUpdate(BaseModel):

View File

@@ -18,6 +18,7 @@ from app.repositories.neo4j.neo4j_connector import Neo4jConnector
from app.models.app_model import App
from app.models.app_release_model import AppRelease
from app.models.end_user_model import EndUser
from app.utils.config_utils import resolve_config_id
api_logger = get_api_logger()
@@ -88,38 +89,36 @@ class WorkspaceAppService:
for release in app_releases:
memory_content = self._extract_memory_content(release.config)
memory_content=resolve_config_id(memory_content, self.db)
if memory_content and memory_content in processed_configs:
continue
release_info = {
"app_id": str(release.app_id),
"config": memory_content
}
if memory_content:
processed_configs.add(memory_content)
memory_config_info = self._get_memory_config(memory_content)
if memory_config_info:
if not any(dc["config_id"] == memory_config_info["config_id"] for dc in app_info["memory_configs"]):
app_info["memory_configs"].append(memory_config_info)
app_info["releases"].append(release_info)
def _extract_memory_content(self, config: Any) -> str:
"""Extract memory_comtent from config"""
if not config or not isinstance(config, dict):
return None
memory_obj = config.get('memory')
if memory_obj and isinstance(memory_obj, dict):
return memory_obj.get('memory_content')
return None
def _get_memory_config(self, memory_content: str) -> Dict[str, Any]:
"""Retrieve memory_config information based on memory_content"""
try:
@@ -129,7 +128,7 @@ class WorkspaceAppService:
# memory_config_result = self.db.execute(text(memory_config_query), memory_config_params).fetchone()
# if memory_config_result is None:
# return None
if memory_config_result:
return {
"config_id": memory_config_result.config_id,
@@ -144,20 +143,22 @@ class WorkspaceAppService:
}
except Exception as e:
api_logger.warning(f"查询memory_config失败memory_content: {memory_content}, 错误: {str(e)}")
return None
def _process_end_users(self, app: App, app_info: Dict[str, Any]) -> None:
"""Processing end-user information for applications"""
end_users = self.db.query(EndUser).filter(EndUser.app_id == app.id).all()
for end_user in end_users:
end_user_info = {
"id": str(end_user.id),
"app_id": str(end_user.app_id)
}
app_info["end_users"].append(end_user_info)
print(100*'-')
print(app_info)
def get_end_user_reflection_time(self, end_user_id: str) -> Optional[Any]:
"""
Read the reflection time of end users
@@ -176,7 +177,7 @@ class WorkspaceAppService:
except Exception as e:
api_logger.error(f"读取用户反思时间失败end_user_id: {end_user_id}, 错误: {str(e)}")
return None
def update_end_user_reflection_time(self, end_user_id: str) -> bool:
"""
Update the reflection time of end users to the current time
@@ -189,7 +190,7 @@ class WorkspaceAppService:
"""
try:
from datetime import datetime
end_user = self.db.query(EndUser).filter(EndUser.id == end_user_id).first()
if end_user:
end_user.reflection_time = datetime.now()
@@ -207,7 +208,7 @@ class WorkspaceAppService:
class MemoryReflectionService:
"""Memory reflection service category"""
def __init__(self,db: Session = Depends(get_db)):
self.db=db
@@ -252,22 +253,22 @@ class MemoryReflectionService:
"end_user_id": end_user_id,
"config_data": config_data
}
async def start_reflection_from_data(self, config_data: Dict[str, Any], end_user_id: str) -> Dict[str, Any]:
"""
Starting Reflection from Configuration Data
Args:
config_data: Configure data dictionary, including reflective configuration information
end_user_id: end_user_id
Returns:
Reflect on the execution results
"""
try:
config_id = config_data.get("config_id")
api_logger.info(f"从配置数据启动反思config_id: {config_id}, end_user_id: {end_user_id}")
if not config_data.get("enable_self_reflexion", False):
return {
@@ -277,7 +278,7 @@ class MemoryReflectionService:
"end_user_id": end_user_id,
"config_data": config_data
}
config_data_id=config_data['config_id']
reflection_config=WorkspaceAppService(self.db)._get_memory_config(config_data_id)

View File

@@ -347,7 +347,9 @@ class ModelConfigService:
"is_public": model_data.is_public,
"is_composite": True
}
if "load_balance_strategy" in model_data.model_fields_set:
model_config_data["load_balance_strategy"] = model_data.load_balance_strategy
model = ModelConfigRepository.create(db, model_config_data)
db.flush()
@@ -380,7 +382,7 @@ class ModelConfigService:
for model_config in api_key.model_configs:
compatible_types = {ModelType.LLM, ModelType.CHAT}
config_type = model_config.type
request_type = model_data.type
request_type = existing_model.type
if not (config_type == request_type or
(config_type in compatible_types and request_type in compatible_types)):
@@ -391,12 +393,14 @@ class ModelConfigService:
# 更新基本信息
existing_model.name = model_data.name
existing_model.type = model_data.type
# existing_model.type = model_data.type
existing_model.logo = model_data.logo
existing_model.description = model_data.description
existing_model.config = model_data.config
existing_model.is_active = model_data.is_active
existing_model.is_public = model_data.is_public
if "load_balance_strategy" in model_data.model_fields_set:
existing_model.load_balance_strategy = model_data.load_balance_strategy
# 更新 API Keys 关联
existing_model.api_keys.clear()

View File

@@ -16,7 +16,7 @@ from app.models.prompt_optimizer_model import (
PromptOptimizerSession,
RoleType
)
from app.repositories.model_repository import ModelConfigRepository
from app.repositories.model_repository import ModelConfigRepository, ModelApiKeyRepository
from app.repositories.prompt_optimizer_repository import (
PromptOptimizerSessionRepository
)
@@ -168,7 +168,8 @@ class PromptOptimizerService:
logger.info(f"Prompt optimization started, user_id={user_id}, session_id={session_id}")
# Create LLM instance
api_config: ModelApiKey = model_config.api_keys[0]
api_keys = ModelApiKeyRepository.get_by_model_config(self.db, model_config.id)
api_config: ModelApiKey = api_keys[0] if api_keys else None
llm = RedBearLLM(RedBearModelConfig(
model_name=api_config.model_name,
provider=api_config.provider,

View File

@@ -21,8 +21,18 @@ def resolve_config_id(config_id: UUID | int, db: Session) -> UUID:
Raises:
ValueError: 当找不到对应的配置时
"""
from app.models.memory_config_model import MemoryConfig
if isinstance(config_id, UUID):
return config_id
if isinstance(config_id, str) and len(config_id)<=6:
memory_config = db.query(MemoryConfig).filter(
MemoryConfig.config_id_old == config_id
).first()
if not memory_config:
raise ValueError(f"未找到 config_id_old={config_id} 对应的配置")
return memory_config.config_id
if isinstance(config_id, int):
from app.models.memory_config_model import MemoryConfig
memory_config = db.query(MemoryConfig).filter(
MemoryConfig.config_id_old == config_id
).first()

View File

@@ -0,0 +1,30 @@
"""202601291352
Revision ID: 5ca246ee7dd4
Revises: 915bed077f8d
Create Date: 2026-01-29 13:52:47.647306
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision: str = '5ca246ee7dd4'
down_revision: Union[str, None] = '915bed077f8d'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('model_bases', sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True, comment='创建时间'))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('model_bases', 'created_at')
# ### end Alembic commands ###

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env python3
"""API Key认证MCP服务器"""
from fastapi import FastAPI, HTTPException, Depends, Header
from typing import Optional
import uvicorn
from mcp_base import MCPRequest, handle_mcp_request, TOOLS
app = FastAPI(title="API Key MCP Server", version="1.0.0")
# API Key配置
API_KEYS = {"test-api-key", "demo-key-123"}
def verify_api_key(x_api_key: Optional[str] = Header(None)):
"""验证API Key"""
if x_api_key and x_api_key in API_KEYS:
return True
raise HTTPException(status_code=401, detail="Invalid API Key")
@app.get("/")
async def root():
return {"name": "API Key MCP Server", "version": "1.0.0", "auth_type": "api_key"}
@app.get("/health")
async def health():
return {"status": "healthy", "tools": len(TOOLS), "auth_type": "api_key"}
@app.post("/mcp")
async def mcp_handler(request: MCPRequest, _: bool = Depends(verify_api_key)):
return await handle_mcp_request(request, "API Key MCP Server")
if __name__ == "__main__":
print("启动API Key认证MCP服务器...")
print("访问 http://localhost:8004 查看服务状态")
print("MCP端点: http://localhost:8004/mcp")
print("认证方式: API Key (Header: X-API-Key)")
print("测试API Keys: test-api-key, demo-key-123")
uvicorn.run(app, host="0.0.0.0", port=8004)

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env python3
"""Basic Auth认证MCP服务器"""
from fastapi import FastAPI, HTTPException, Depends, Header
from typing import Optional
import uvicorn
import base64
from mcp_base import MCPRequest, handle_mcp_request, TOOLS
app = FastAPI(title="Basic Auth MCP Server", version="1.0.0")
# Basic Auth配置
BASIC_AUTH_USERS = {"admin": "password", "user": "secret"}
def verify_basic_auth(authorization: Optional[str] = Header(None)):
"""验证Basic Auth"""
if authorization and authorization.startswith("Basic "):
try:
credentials = base64.b64decode(authorization.split(" ")[1]).decode()
username, password = credentials.split(":", 1)
if username in BASIC_AUTH_USERS and BASIC_AUTH_USERS[username] == password:
return True
except:
pass
raise HTTPException(status_code=401, detail="Invalid Basic Auth")
@app.get("/")
async def root():
return {"name": "Basic Auth MCP Server", "version": "1.0.0", "auth_type": "basic_auth"}
@app.get("/health")
async def health():
return {"status": "healthy", "tools": len(TOOLS), "auth_type": "basic_auth"}
@app.post("/mcp")
async def mcp_handler(request: MCPRequest, _: bool = Depends(verify_basic_auth)):
return await handle_mcp_request(request, "Basic Auth MCP Server")
if __name__ == "__main__":
print("启动Basic Auth认证MCP服务器...")
print("访问 http://localhost:8006 查看服务状态")
print("MCP端点: http://localhost:8006/mcp")
print("认证方式: Basic Auth (Header: Authorization: Basic <base64>)")
print("测试用户: admin:password, user:secret")
uvicorn.run(app, host="0.0.0.0", port=8006)

View File

@@ -1,40 +0,0 @@
#!/usr/bin/env python3
"""Bearer Token认证MCP服务器"""
from fastapi import FastAPI, HTTPException, Depends, Header
from typing import Optional
import uvicorn
from mcp_base import MCPRequest, handle_mcp_request, TOOLS
app = FastAPI(title="Bearer Token MCP Server", version="1.0.0")
# Bearer Token配置
BEARER_TOKENS = {"bearer-token-123", "demo-bearer-token"}
def verify_bearer_token(authorization: Optional[str] = Header(None)):
"""验证Bearer Token"""
if authorization and authorization.startswith("Bearer "):
token = authorization.split(" ")[1]
if token in BEARER_TOKENS:
return True
raise HTTPException(status_code=401, detail="Invalid Bearer Token")
@app.get("/")
async def root():
return {"name": "Bearer Token MCP Server", "version": "1.0.0", "auth_type": "bearer_token"}
@app.get("/health")
async def health():
return {"status": "healthy", "tools": len(TOOLS), "auth_type": "bearer_token"}
@app.post("/mcp")
async def mcp_handler(request: MCPRequest, _: bool = Depends(verify_bearer_token)):
return await handle_mcp_request(request, "Bearer Token MCP Server")
if __name__ == "__main__":
print("启动Bearer Token认证MCP服务器...")
print("访问 http://localhost:8005 查看服务状态")
print("MCP端点: http://localhost:8005/mcp")
print("认证方式: Bearer Token (Header: Authorization: Bearer <token>)")
print("测试Bearer Tokens: bearer-token-123, demo-bearer-token")
uvicorn.run(app, host="0.0.0.0", port=8005)

View File

@@ -1,111 +0,0 @@
#!/usr/bin/env python3
"""MCP服务器基础模块 - 共享的模型和处理逻辑"""
from pydantic import BaseModel
from typing import Dict, Any
class MCPRequest(BaseModel):
jsonrpc: str = "2.0"
id: str
method: str
params: Dict[str, Any] = {}
class MCPResponse(BaseModel):
jsonrpc: str = "2.0"
id: str
result: Any = None
error: Dict[str, Any] = None
# 工具定义
TOOLS = [
{
"name": "calculator",
"description": "简单计算器",
"inputSchema": {
"type": "object",
"properties": {
"expression": {"type": "string", "description": "数学表达式"}
},
"required": ["expression"]
}
},
{
"name": "echo",
"description": "回显工具",
"inputSchema": {
"type": "object",
"properties": {
"message": {"type": "string", "description": "要回显的消息"}
},
"required": ["message"]
}
}
]
async def handle_mcp_request(request: MCPRequest, server_name: str = "MCP Server"):
"""处理MCP请求"""
try:
if request.method == "initialize":
return MCPResponse(
id=request.id,
result={
"protocolVersion": "2024-11-05",
"capabilities": {"tools": {"listChanged": True}},
"serverInfo": {"name": server_name, "version": "1.0.0"}
}
)
elif request.method == "tools/list":
return MCPResponse(
id=request.id,
result={"tools": TOOLS}
)
elif request.method == "tools/call":
tool_name = request.params.get("name")
arguments = request.params.get("arguments", {})
if tool_name == "calculator":
try:
expression = arguments.get("expression", "")
result = eval(expression)
return MCPResponse(
id=request.id,
result={"content": [{"type": "text", "text": f"结果: {result}"}]}
)
except Exception as e:
return MCPResponse(
id=request.id,
error={"code": -1, "message": f"计算错误: {str(e)}"}
)
elif tool_name == "echo":
message = arguments.get("message", "")
return MCPResponse(
id=request.id,
result={"content": [{"type": "text", "text": f"Echo: {message}"}]}
)
else:
return MCPResponse(
id=request.id,
error={"code": -1, "message": f"未知工具: {tool_name}"}
)
elif request.method == "ping":
return MCPResponse(
id=request.id,
result={"status": "pong"}
)
else:
return MCPResponse(
id=request.id,
error={"code": -1, "message": f"未知方法: {request.method}"}
)
except Exception as e:
return MCPResponse(
id=request.id,
error={"code": -1, "message": str(e)}
)

View File

@@ -1,130 +0,0 @@
#!/usr/bin/env python3
"""简化的MCP服务器 - 用于测试MCP工具集成"""
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Dict, Any, List
import uvicorn
app = FastAPI(title="Simple MCP Server", version="1.0.0")
class MCPRequest(BaseModel):
jsonrpc: str = "2.0"
id: str
method: str
params: Dict[str, Any] = {}
class MCPResponse(BaseModel):
jsonrpc: str = "2.0"
id: str
result: Any = None
error: Dict[str, Any] = None
# 可用工具定义
TOOLS = [
{
"name": "calculator",
"description": "简单计算器",
"inputSchema": {
"type": "object",
"properties": {
"expression": {"type": "string", "description": "数学表达式"}
},
"required": ["expression"]
}
},
{
"name": "echo",
"description": "回显工具",
"inputSchema": {
"type": "object",
"properties": {
"message": {"type": "string", "description": "要回显的消息"}
},
"required": ["message"]
}
}
]
@app.get("/")
async def root():
return {"name": "Simple MCP Server", "version": "1.0.0"}
@app.get("/health")
async def health():
return {"status": "healthy", "tools": len(TOOLS)}
@app.post("/mcp")
async def mcp_handler(request: MCPRequest):
"""处理MCP请求"""
try:
if request.method == "initialize":
return MCPResponse(
id=request.id,
result={
"protocolVersion": "2024-11-05",
"capabilities": {"tools": {"listChanged": True}},
"serverInfo": {"name": "Simple MCP Server", "version": "1.0.0"}
}
)
elif request.method == "tools/list":
return MCPResponse(
id=request.id,
result={"tools": TOOLS}
)
elif request.method == "tools/call":
tool_name = request.params.get("name")
arguments = request.params.get("arguments", {})
if tool_name == "calculator":
try:
expression = arguments.get("expression", "")
result = eval(expression) # 注意生产环境不要用eval
return MCPResponse(
id=request.id,
result={"content": [{"type": "text", "text": f"结果: {result}"}]}
)
except Exception as e:
return MCPResponse(
id=request.id,
error={"code": -1, "message": f"计算错误: {str(e)}"}
)
elif tool_name == "echo":
message = arguments.get("message", "")
return MCPResponse(
id=request.id,
result={"content": [{"type": "text", "text": f"Echo: {message}"}]}
)
else:
return MCPResponse(
id=request.id,
error={"code": -1, "message": f"未知工具: {tool_name}"}
)
elif request.method == "ping":
return MCPResponse(
id=request.id,
result={"status": "pong"}
)
else:
return MCPResponse(
id=request.id,
error={"code": -1, "message": f"未知方法: {request.method}"}
)
except Exception as e:
return MCPResponse(
id=request.id,
error={"code": -1, "message": str(e)}
)
if __name__ == "__main__":
print("启动简化MCP服务器...")
print("访问 http://localhost:8002 查看服务状态")
print("MCP端点: http://localhost:8002/mcp")
uvicorn.run(app, host="0.0.0.0", port=8002)

View File

@@ -546,7 +546,10 @@ export const en = {
tags: 'Tags',
createCustomModel: 'Add Custom Model',
edit: 'Edit',
selectOneTip: 'Model API KEY not configured, please configure in Model Plaza first',
selectOneTip: 'Model API KEY not configured, please configure it in the model list first',
load_balance_strategy: 'Concurrency Strategy',
round_robin: 'Sequential Execution - Call each model in order',
none: 'None',
api_key: 'API KEY',
api_base: 'API Base URL',
@@ -554,6 +557,7 @@ export const en = {
add: 'Add',
item: 'item',
apiKeyNum: ' API Keys',
official: 'Official',
llm: 'LLM',
chat: 'Chat',

View File

@@ -1118,7 +1118,10 @@ export const zh = {
tags: '标签',
createCustomModel: '添加自定义模型',
edit: '编辑',
selectOneTip: '模型未配置API KEY请先在模型广场配置',
selectOneTip: '模型未配置API KEY请先在模型列表配置',
load_balance_strategy: '并发策略',
round_robin: '顺序执行 - 按顺序依次调用每个模型',
none: '无',
api_key: 'API KEY',
api_base: 'API Base URL',
@@ -1126,6 +1129,7 @@ export const zh = {
add: '添加',
item: '个',
apiKeyNum: '个 API Key',
official: '官方',
llm: 'LLM',
chat: 'Chat',

View File

@@ -23,7 +23,6 @@ const ModelList: FC<{ query: any }> = ({ query }) => {
getModelNewList({
...query,
is_composite: false,
is_active: true,
})
.then(res => {
setList((res || []) as ProviderModelItem[])

View File

@@ -56,7 +56,10 @@ const ModelSquare = forwardRef <BaseRef, { query: any; handleEdit: (vo?: ModelPl
<RbCard
key={item.id}
title={item.name}
subTitle={<Tag className="rb:mt-1">{t(`modelNew.${item.type}`)}</Tag>}
subTitle={<Space size={8}>
<Tag className="rb:mt-1">{t(`modelNew.${item.type}`)}</Tag>
{item.is_official && <Tag color="success" className="rb:mt-1">{t(`modelNew.official`)}</Tag>}
</Space>}
avatarUrl={getLogoUrl(item.logo)}
avatar={
<div className="rb:w-12 rb:h-12 rb:rounded-lg rb:mr-3.25 rb:bg-[#155eef] rb:flex rb:items-center rb:justify-center rb:text-[28px] rb:text-[#ffffff]">

View File

@@ -44,7 +44,8 @@ const CustomModelModal = forwardRef<CustomModelModalRef, CustomModelModalProps>(
};
const handleUpdate = (data: CustomModelForm) => {
setLoading(true)
const res = isEdit ? updateCustomModel(model.id, data) : addCustomModel(data)
const { type, provider, ...rest} = data
const res = isEdit ? updateCustomModel(model.id, rest) : addCustomModel(data)
res.then(() => {
refresh && refresh()
@@ -129,6 +130,7 @@ const CustomModelModal = forwardRef<CustomModelModalRef, CustomModelModalProps>(
<CustomSelect
url={modelTypeUrl}
hasAll={false}
disabled={isEdit}
format={(items) => items.map((item) => ({ label: t(`modelNew.${item}`), value: String(item) }))}
/>
</Form.Item>
@@ -141,6 +143,7 @@ const CustomModelModal = forwardRef<CustomModelModalRef, CustomModelModalProps>(
<CustomSelect
url={modelProviderUrl}
hasAll={false}
disabled={isEdit}
format={(items) => items.map((item) => ({ label: t(`modelNew.${item}`), value: String(item) }))}
/>
</Form.Item>

View File

@@ -1,5 +1,5 @@
import { forwardRef, useImperativeHandle, useState } from 'react';
import { Form, Input, App } from 'antd';
import { Form, Input, App, Select } from 'antd';
import { useTranslation } from 'react-i18next';
import type { ModelListItem, CompositeModelForm, GroupModelModalRef, GroupModelModalProps, ModelApiKey } from '../types';
@@ -75,8 +75,9 @@ const GroupModelModal = forwardRef<GroupModelModalRef, GroupModelModalProps>(({
const handleUpdate = (data: CompositeModelForm) => {
setLoading(true)
const { type, ...rest } = data
const res = isEdit
? updateCompositeModel(model.id, data)
? updateCompositeModel(model.id, { ...rest })
: addCompositeModel(data)
res.then(() => {
@@ -106,6 +107,7 @@ const GroupModelModal = forwardRef<GroupModelModalRef, GroupModelModalProps>(({
<Form
form={form}
layout="vertical"
initialValues={{ balance_strategy: 'none' }}
>
<Form.Item
name="logo"
@@ -147,6 +149,19 @@ const GroupModelModal = forwardRef<GroupModelModalRef, GroupModelModalProps>(({
<Input.TextArea placeholder={t('common.pleaseEnter')} />
</Form.Item>
<Form.Item
name="load_balance_strategy"
label={t('modelNew.load_balance_strategy')}
>
<Select
options={['round_robin', 'none'].map(key => ({
label: t(`modelNew.${key}`),
value: key
}))}
placeholder={t('common.pleaseSelect')}
/>
</Form.Item>
<Form.Item name="api_key_ids">
<ModelImplement type={type} />
</Form.Item>

View File

@@ -35,12 +35,12 @@ const KeyConfigModal = forwardRef<KeyConfigModalRef, KeyConfigModalProps>(({
updateProviderApiKeys({
...values,
provider: model.provider
}).then(() => {
}).then((res) => {
if (refresh) {
refresh();
}
handleClose()
message.success(t('common.updateSuccess'))
message.success(res as string)
})
.catch(() => {
setLoading(false)

View File

@@ -1,8 +1,8 @@
import { forwardRef, useImperativeHandle, useState } from 'react';
import { Form, Cascader, App } from 'antd';
import { forwardRef, useImperativeHandle, useState, useEffect } from 'react';
import { Form, Cascader, App, type CascaderProps } from 'antd';
import { useTranslation } from 'react-i18next';
import type { SubModelModalForm, SubModelModalRef, SubModelModalProps, ModelList } from './types';
import type { SubModelModalForm, SubModelModalRef, SubModelModalProps } from './types';
import RbModal from '@/components/RbModal'
import CustomSelect from '@/components/CustomSelect'
import { modelProviderUrl, getModelNewList } from '@/api/models'
@@ -18,7 +18,8 @@ interface Option {
}
const SubModelModal = forwardRef<SubModelModalRef, SubModelModalProps>(({
refresh,
type
type,
groupedByProvider
}, ref) => {
const { t } = useTranslation();
const { message } = App.useApp()
@@ -26,28 +27,27 @@ const SubModelModal = forwardRef<SubModelModalRef, SubModelModalProps>(({
const [form] = Form.useForm<SubModelModalForm>();
const [selecteds, setSelecteds] = useState<any[]>([])
const [modelList, setModelList] = useState<Option[]>([])
const provider = Form.useWatch(['provider'], form)
useEffect(() => {
if (provider && groupedByProvider) {
const lastModels = groupedByProvider[provider] || []
const list = lastModels.map(vo => [{ name: vo.model_name, id: vo.model_config_ids[0], value: vo.model_config_ids[0], provider }, { value: vo.id }])
setSelecteds(list)
form.setFieldValue('api_key_ids', lastModels.map(vo => [vo.model_config_ids[0], vo.id]))
}
}, [groupedByProvider, provider])
// 封装取消方法,添加关闭弹窗逻辑
const handleClose = () => {
form.resetFields();
setVisible(false);
setSelecteds([])
setModelList([])
};
const handleOpen = (list?: ModelList[], provider?: string) => {
if (list?.length && provider) {
const initialValue: SubModelModalForm = {
provider,
api_key_ids: list.map(vo => {
return [vo.model_config_ids[0], vo.id]
})
}
form.setFieldsValue(initialValue);
handleChangeProvider(provider, initialValue.api_key_ids)
} else {
form.resetFields()
}
const handleOpen = () => {
form.resetFields()
setVisible(true);
};
// 封装保存方法,添加提交逻辑
@@ -59,7 +59,8 @@ const SubModelModal = forwardRef<SubModelModalRef, SubModelModalProps>(({
...vo[0],
model_name: vo[0].name,
model_config_ids: [vo[0].id],
id: vo[1].value
id: vo[1].value,
api_key: vo[1].label
})))
handleClose()
})
@@ -67,7 +68,6 @@ const SubModelModal = forwardRef<SubModelModalRef, SubModelModalProps>(({
const handleChange = (value: (string | number)[][], selectedOptions: Option[][]) => {
const filterList = selectedOptions.filter(vo => vo.length === 1).map(item => item[0])
const lastFilterLit = value.filter(vo => vo.length !== 1)
console.log('onchange', value, lastFilterLit, selectedOptions, filterList)
if (filterList.length) {
message.warning(`${filterList.map(vo => vo.label)}${t('modelNew.selectOneTip')}`)
form.setFieldValue('api_key_ids', lastFilterLit)
@@ -77,35 +77,51 @@ const SubModelModal = forwardRef<SubModelModalRef, SubModelModalProps>(({
const handleChangeProvider = (provider: string, api_key_ids?: any[]) => {
form.setFieldValue('api_key_ids', undefined)
getModelNewList({
provider: provider,
is_composite: false,
is_active: true,
type
})
.then(res => {
const response = res as ProviderModelItem[]
const list = response[0]?.models || []
setModelList(list.map(vo => {
const children = vo.api_keys.map(item => ({
label: item.api_key,
value: item.id,
}))
return {
...vo,
label: vo.name,
value: vo.id,
children: children
}
}))
if (api_key_ids?.length) {
form.setFieldsValue({
api_key_ids: api_key_ids
})
}
if (provider) {
getModelNewList({
provider: provider,
is_composite: false,
is_active: true,
type
})
.then(res => {
const response = res as ProviderModelItem[]
const list = response[0]?.models || []
setModelList(list.map(vo => {
const children = vo.api_keys.map(item => ({
label: item.api_key,
value: item.id,
}))
return {
...vo,
label: vo.name,
value: vo.id,
children: children
}
}))
if (api_key_ids?.length) {
form.setFieldsValue({
api_key_ids: api_key_ids
})
}
})
} else {
setModelList([])
}
}
const displayRender: CascaderProps<Option>['displayRender'] = (labels, selectedOptions = []) =>
labels.map((label, i) => {
const option = selectedOptions[i];
if (i === labels.length - 1) {
return (
<span key={option?.value || i}>
{label}
</span>
);
}
return <span key={option?.value || i}>{label} / </span>;
});
// 暴露给父组件的方法
useImperativeHandle(ref, () => ({
@@ -154,6 +170,7 @@ const SubModelModal = forwardRef<SubModelModalRef, SubModelModalProps>(({
className="rb:w-full!"
showCheckedStrategy={SHOW_CHILD}
changeOnSelect
displayRender={displayRender}
/>
</Form.Item>
</Form>

View File

@@ -24,18 +24,15 @@ const ModelImplement: FC<ModelImplementProps> = ({ type, value, onChange }) => {
}
subModelModalRef.current?.handleOpen()
}
const handleEdit = (list: ModelList[], provider: string ) => {
subModelModalRef.current?.handleOpen(list, provider)
}
const handleDelete = (provider: string) => {
const handleDelete = (vo: any) => {
modal.confirm({
title: t('common.confirmDeleteDesc', { name: provider }),
title: t('common.confirmDeleteDesc', { name: [vo.model_name, vo.api_key].join(' / ') }),
content: t('application.apiKeyDeleteContent'),
okText: t('common.delete'),
cancelText: t('common.cancel'),
okType: 'danger',
onOk: () => {
onChange?.(value?.filter((item: any) => item.provider !== provider))
onChange?.(value?.filter((item: any) => item.id !== vo.id))
}
})
}
@@ -73,23 +70,18 @@ const ModelImplement: FC<ModelImplementProps> = ({ type, value, onChange }) => {
<div className="rb:bg-[#F5F6F7] rb:rounded-lg rb:p-3 rb:mt-2">
{!value || value.length === 0
? <Empty size={88} />
: Object.entries(groupedByProvider).map(([provider, items]: [string, ModelList[]]) => {
: value.map((item: any) => {
return (
<div key={provider} className="rb:mb-4 last:rb:mb-0">
<Flex justify="space-between" align="center" className="rb:mb-2 last:rb:mb-0">
<div className="rb:font-medium">{[...new Set(items?.map((vo) => vo.model_name))].join(', ')}</div>
<Space>
<div
className="rb:w-6 rb:h-6 rb:cursor-pointer rb:bg-[url('@/assets/images/editBorder.svg')] rb:hover:bg-[url('@/assets/images/editBg.svg')]"
onClick={() => handleEdit(items, provider)}
></div>
<div
className="rb:w-6 rb:h-6 rb:cursor-pointer rb:bg-[url('@/assets/images/deleteBorder.svg')] rb:hover:bg-[url('@/assets/images/deleteBg.svg')]"
onClick={() => handleDelete(provider)}
></div>
</Space>
<div key={item.id} className="rb:mb-4 rb:last:rb:mb-0 rb:bg-[#FBFDFF] rb:rounded-lg rb:p-3">
<Flex gap={8} justify="space-between" align="center" className="rb:mb-2 rb:last:rb:mb-0">
<div className="rb:font-medium">{[item.model_name, item.api_key].join(' / ')}</div>
<div
className="rb:w-6 rb:h-6 rb:cursor-pointer rb:bg-[url('@/assets/images/deleteBorder.svg')] rb:hover:bg-[url('@/assets/images/deleteBg.svg')]"
onClick={() => handleDelete(item)}
></div>
</Flex>
<Tag className="rb:mb-2">{t(`modelNew.${provider}`)}</Tag>
<Tag className="rb:mb-2">{t(`modelNew.${item.provider}`)}</Tag>
</div>
)
})}
@@ -98,6 +90,7 @@ const ModelImplement: FC<ModelImplementProps> = ({ type, value, onChange }) => {
ref={subModelModalRef}
refresh={handleRefresh}
type={type}
groupedByProvider={groupedByProvider}
/>
</div>
)

View File

@@ -8,9 +8,10 @@ export interface SubModelModalForm {
api_key_ids: string[][];
}
export interface SubModelModalRef {
handleOpen: (list?: ModelList[], provider?: string) => void;
handleOpen: () => void;
}
export interface SubModelModalProps {
type?: string;
refresh?: (vo: ModelList[]) => void;
groupedByProvider?: Record<string, ModelList[]>
}

View File

@@ -1,4 +1,4 @@
import { useState, useImperativeHandle, forwardRef, useRef } from 'react';
import { useState, useImperativeHandle, forwardRef, useRef, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { Button, Switch, Row, Col, Space, Tooltip } from 'antd'
@@ -8,8 +8,9 @@ import RbCard from '@/components/RbCard/Card'
import Tag from '@/components/Tag';
import PageEmpty from '@/components/Empty/PageEmpty';
import MultiKeyConfigModal from './MultiKeyConfigModal'
import { getModelNewList, updateModelStatus } from '@/api/models'
import { getModelNewList, updateModelStatus, modelTypeUrl } from '@/api/models'
import { getLogoUrl } from '../utils'
import CustomSelect from '@/components/CustomSelect'
interface ModelListDetailProps {
refresh?: () => void;
@@ -22,8 +23,10 @@ const ModelListDetail = forwardRef<ModelListDetailRef, ModelListDetailProps>(({
const [list, setList] = useState<ModelListItem[]>([])
const multiKeyConfigModalRef = useRef<MultiKeyConfigModalRef>(null)
const [loading, setLoading] = useState(false)
const [type, setType] = useState<string | undefined | null>(null)
const handleOpen = (vo: ProviderModelItem) => {
setType(null)
setOpen(true)
getData(vo)
}
@@ -53,27 +56,50 @@ const ModelListDetail = forwardRef<ModelListDetailRef, ModelListDetailProps>(({
}
const handleClose = () => {
setType(null)
setOpen(false)
refresh?.()
}
const handleRefresh = () => {
getData(data)
}
const handleTypeChange = (value: string) => {
setType(value)
}
useImperativeHandle(ref, () => ({
handleOpen,
}));
const filterList = useMemo(() => {
if (!type) return list
return list.filter(vo => vo.type === type)
}, [type, list])
return (
<RbDrawer
title={<>{t(`modelNew.${data.provider}`)} {t('modelNew.modelList')} ({list.length}{t('modelNew.item')})</>}
open={open}
onClose={handleClose}
>
{list.length === 0
<Row gutter={16}>
<Col span={12}>
<CustomSelect
value={type}
url={modelTypeUrl}
hasAll={false}
format={(items) => items.map((item) => ({ label: t(`modelNew.${item}`), value: String(item) }))}
onChange={handleTypeChange}
className="rb:w-full"
allowClear={true}
placeholder={t('modelNew.type')}
/>
</Col>
</Row>
{filterList.length === 0
? <PageEmpty />
: <div className="rb:grid rb:grid-cols-2 rb:gap-4">
{list.map(item => (
: <div className="rb:grid rb:grid-cols-2 rb:gap-4 rb:mt-3">
{filterList.map(item => (
<RbCard
key={item.id}
title={item.name}

View File

@@ -65,7 +65,10 @@ const ModelSquareDetail = forwardRef<ModelSquareDetailRef, ModelSquareDetailProp
<RbCard
key={item.id}
title={item.name}
subTitle={<Tag className="rb:mt-1">{t(`modelNew.${item.type}`)}</Tag>}
subTitle={<Space size={8}>
<Tag className="rb:mt-1">{t(`modelNew.${item.type}`)}</Tag>
{item.is_official && <Tag color="success" className="rb:mt-1">{t(`modelNew.official`)}</Tag>}
</Space>}
avatarUrl={getLogoUrl(item.logo)}
avatar={
<div className="rb:w-12 rb:h-12 rb:rounded-lg rb:mr-3.25 rb:bg-[#155eef] rb:flex rb:items-center rb:justify-center rb:text-[28px] rb:text-[#ffffff]">

View File

@@ -1,9 +1,9 @@
import { useState, useRef, type FC } from 'react';
import { Button, Flex, Space, type SegmentedProps } from 'antd'
import { Button, Flex, Space, type SegmentedProps, Form } from 'antd'
import { useTranslation } from 'react-i18next';
import GroupModelModal from './components/GroupModelModal'
import type { ModelListItem, GroupModelModalRef, CustomModelModalRef, ModelPlazaItem, BaseRef } from './types'
import type { ModelListItem, GroupModelModalRef, CustomModelModalRef, ModelPlazaItem, BaseRef, Query } from './types'
import SearchInput from '@/components/SearchInput'
import PageTabs from '@/components/PageTabs'
import GroupModel from './Group'
@@ -17,11 +17,12 @@ const tabKeys = ['group', 'list', 'square']
const ModelManagement: FC = () => {
const { t } = useTranslation();
const [activeTab, setActiveTab] = useState('group');
const [query, setQuery] = useState({})
const configModalRef = useRef<GroupModelModalRef>(null)
const customModelModalRef = useRef<CustomModelModalRef>(null)
const groupRef = useRef<BaseRef>(null)
const squareRef = useRef<BaseRef>(null)
const [form] = Form.useForm<Query>()
const query = Form.useWatch([], form)
const formatTabItems = () => {
return tabKeys.map(value => ({
@@ -31,7 +32,7 @@ const ModelManagement: FC = () => {
}
const handleChangeTab = (value: SegmentedProps['value']) => {
setActiveTab(value as string);
setQuery({})
form.resetFields()
}
const handleEdit = (vo?: ModelListItem | ModelPlazaItem) => {
@@ -54,15 +55,6 @@ const ModelManagement: FC = () => {
break
}
}
const handleSearch = (value?: string) => {
setQuery({ search: value })
}
const handleTypeChange = (value: string) => {
setQuery(pre => ({ ...pre, type: value }))
}
const handleProviderChange = (value: string) => {
setQuery(pre => ({ ...pre, provider: value }))
}
return (
<>
@@ -73,35 +65,44 @@ const ModelManagement: FC = () => {
onChange={handleChangeTab}
/>
<Space size={12}>
{activeTab === 'list' ? <>
<CustomSelect
url={modelTypeUrl}
hasAll={false}
format={(items) => items.map((item) => ({ label: t(`modelNew.${item}`), value: String(item) }))}
onChange={handleTypeChange}
className="rb:w-30"
allowClear={true}
placeholder={t('modelNew.type')}
/>
<CustomSelect
url={modelProviderUrl}
hasAll={false}
format={(items) => items.map((item) => ({ label: t(`modelNew.${item}`), value: String(item) }))}
onChange={handleProviderChange}
className="rb:w-30"
allowClear={true}
placeholder={t('modelNew.provider')}
/>
</>
: <SearchInput
placeholder={t(`modelNew.${activeTab}SearchPlaceholder`)}
onSearch={handleSearch}
className="rb:w-70!"
/>}
{activeTab === 'group' && <Button type="primary" onClick={() => handleEdit()}>+ {t('modelNew.createGroupModel')}</Button>}
{activeTab === 'square' && <Button type="primary" onClick={() => handleEdit()}>+ {t('modelNew.createCustomModel')}</Button>}
</Space>
<Form form={form}>
<Space size={12}>
{activeTab === 'list' &&
<Form.Item name="type" noStyle>
<CustomSelect
url={modelTypeUrl}
hasAll={false}
format={(items) => items.map((item) => ({ label: t(`modelNew.${item}`), value: String(item) }))}
className="rb:w-30"
allowClear={true}
placeholder={t('modelNew.type')}
/>
</Form.Item>
}
{(activeTab === 'list' || activeTab === 'square') &&
<Form.Item name="provider" noStyle>
<CustomSelect
url={modelProviderUrl}
hasAll={false}
format={(items) => items.map((item) => ({ label: t(`modelNew.${item}`), value: String(item) }))}
className="rb:w-30"
allowClear={true}
placeholder={t('modelNew.provider')}
/>
</Form.Item>
}
{activeTab !== 'list' &&
<Form.Item name="search" noStyle>
<SearchInput
placeholder={t(`modelNew.${activeTab}SearchPlaceholder`)}
className="rb:w-70!"
/>
</Form.Item>
}
{activeTab === 'group' && <Button type="primary" onClick={() => handleEdit()}>+ {t('modelNew.createGroupModel')}</Button>}
{activeTab === 'square' && <Button type="primary" onClick={() => handleEdit()}>+ {t('modelNew.createCustomModel')}</Button>}
</Space>
</Form>
</Flex>
<div className="rb:w-full rb:h-[calc(100%-48px)] rb:my-4">

View File

@@ -17,7 +17,7 @@ export interface DescriptionItem {
export interface CompositeModelForm {
logo?: any;
name: string;
type: string;
type?: string;
description: string;
api_key_ids: ModelApiKey[] | string[];
}
@@ -119,8 +119,8 @@ export interface ModelSquareDetailRef {
}
export interface CustomModelForm {
name: string;
type: string;
provider: string;
type?: string;
provider?: string;
logo?: any;
description: string;
is_official: boolean;

View File

@@ -105,7 +105,7 @@ export const nodeLibrary: NodeLibrary[] = [
model_id: {
type: 'customSelect',
url: getModelListUrl,
params: { type: 'llm,chat', is_active: true }, // llm/chat
params: { type: 'llm,chat', pagesize: 100, is_active: true }, // llm/chat
valueKey: 'id',
labelKey: 'name',
},
@@ -166,7 +166,7 @@ export const nodeLibrary: NodeLibrary[] = [
model_id: {
type: 'customSelect',
url: getModelListUrl,
params: { type: 'llm,chat', is_active: true }, // llm/chat
params: { type: 'llm,chat', pagesize: 100, is_active: true }, // llm/chat
valueKey: 'id',
labelKey: 'name',
},
@@ -259,7 +259,7 @@ export const nodeLibrary: NodeLibrary[] = [
model_id: {
type: 'customSelect',
url: getModelListUrl,
params: { type: 'llm,chat', is_active: true }, // llm/chat
params: { type: 'llm,chat', pagesize: 100, is_active: true }, // llm/chat
valueKey: 'id',
labelKey: 'name',
},