Compare commits

...

179 Commits

Author SHA1 Message Date
yingzhao
9edf5c9bd6 Merge pull request #915 from SuanmoSuanyangTechnology/fix/userinfo_zy
fix(web): userinfo
2026-04-16 11:11:54 +08:00
zhaoying
2f0c4300df fix(web): userinfo 2026-04-16 11:10:38 +08:00
yingzhao
e0d7a5a91f Merge pull request #906 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
Revert "fix(web): prompt editor"
2026-04-15 14:51:28 +08:00
zhaoying
5ac2d5602e Revert "fix(web): prompt editor"
This reverts commit 71e5b6586a.
2026-04-15 14:50:19 +08:00
yingzhao
f4c3974956 Merge pull request #905 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
fix(web): prompt editor
2026-04-15 14:41:16 +08:00
zhaoying
71e5b6586a fix(web): prompt editor 2026-04-15 14:38:40 +08:00
Ke Sun
bfb723a468 Merge pull request #903 from SuanmoSuanyangTechnology/pref/prompt_optim
pref(prompt-optimizer): handle escaped quotes in JSON parsing
2026-04-15 14:05:18 +08:00
山程漫悟
61f2e44bd5 Merge pull request #904 from SuanmoSuanyangTechnology/fix/Timebomb_030
fix(custom-tools)
2026-04-15 14:01:32 +08:00
Eternity
ed765b7c26 fix(prompt-optimizer): handle escaped quotes in JSON parsing 2026-04-15 13:59:55 +08:00
Timebomb2018
3018d186f7 fix(custom-tools): remove parameter coercion in custom tool base class 2026-04-15 13:56:08 +08:00
山程漫悟
2e1470cb52 Merge pull request #902 from SuanmoSuanyangTechnology/fix/Timebomb_030
fix(model)
2026-04-15 12:27:46 +08:00
Timebomb2018
737858731b fix(core): conditionally apply thinking parameters based on model support 2026-04-15 12:24:11 +08:00
山程漫悟
d072eb1af7 Merge pull request #901 from SuanmoSuanyangTechnology/fix/Timebomb_030
fix(custom-tools)
2026-04-15 12:19:19 +08:00
Timebomb2018
daaee63bd5 refactor(custom-tools): coerce query and request body parameters to schema types 2026-04-15 12:15:16 +08:00
山程漫悟
e3c643b659 Merge pull request #900 from SuanmoSuanyangTechnology/fix/prompt_optim
fix(prompt-optimizer): support list content type in prompt optimizer
2026-04-15 11:39:01 +08:00
Eternity
017efdc320 fix(prompt-optimizer): support list content type in prompt optimizer 2026-04-15 11:03:44 +08:00
Ke Sun
29aef4527c Merge pull request #896 from SuanmoSuanyangTechnology/fix/extract-aliases
fix(memory): make PgSQL the single source of truth for user entity al…
2026-04-14 18:40:40 +08:00
Ke Sun
d9cb2b511b Merge pull request #892 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): add Sourcery summary extraction with Qwen fallback
2026-04-14 18:36:47 +08:00
lanceyq
49e0801d15 refactor(memory): unify user placeholder names and harden alias sync logic
- Replace hardcoded user placeholder name lists in write_tools and
user_memory_service with shared _USER_PLACEHOLDER_NAMES constant
- Filter user placeholder names during alias merging in _merge_attribute
  to prevent cross-role alias contamination on non-user entities
- Use toLower() in Cypher query for case-insensitive name matching
- Change PgSQL->Neo4j alias sync condition from 'if pg_aliases' to
  'if info is not None' so empty aliases correctly clear stale data
2026-04-14 18:06:56 +08:00
山程漫悟
dde7ea9039 Merge pull request #897 from SuanmoSuanyangTechnology/fix/Timebomb_030
fix(rag)
2026-04-14 18:04:11 +08:00
Timebomb2018
5262aedab9 Merge branch 'refs/heads/release/v0.3.0' into fix/Timebomb_030 2026-04-14 17:56:48 +08:00
Timebomb2018
441b21774d fix(rag): replace semicolon separators with newlines in Excel parser output 2026-04-14 17:56:30 +08:00
yingzhao
d6dd038167 Merge pull request #894 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
fix(web): Hide error message when workflow node error message equals …
2026-04-14 17:44:55 +08:00
zhaoying
47c242e513 fix(web): Compatible with Windows whitespace 2026-04-14 17:43:58 +08:00
lanceyq
811193dd75 fix(memory): make PgSQL the single source of truth for user entity aliases
- Skip alias merging for user entities during dedup (_merge_attribute and
  _merge_entities_with_aliases) to prevent dirty data from overwriting
  PgSQL authoritative aliases
- Add PgSQL→Neo4j alias sync after Neo4j write in write_tools to
  ensure Neo4j user entities always reflect the PgSQL source
- Remove deduped_aliases (Neo4j history) from alias sync in
  extraction_orchestrator, only append newly extracted aliases to PgSQL
- Guard Neo4j MERGE cypher to preserve existing aliases for user
  entities (name IN ['用户','我','User','I'])
- Fix emotion_analytics_service query to use ExtractedEntity label
  and entity_type property
2026-04-14 17:28:24 +08:00
山程漫悟
797780824c Merge pull request #895 from SuanmoSuanyangTechnology/fix/Timebomb_030
refactor(rag)
2026-04-14 17:18:13 +08:00
Timebomb2018
75e95bab01 refactor(rag): simplify Excel parsing logic and remove redundant chunk_token_num assignment 2026-04-14 17:10:52 +08:00
zhaoying
3c2a78a449 fix(web): Hide error message when workflow node error message equals empty string 2026-04-14 16:35:19 +08:00
Ke Sun
4f0e5d0866 ci(wechat-notify): add Sourcery summary extraction with Qwen fallback
- Extract Sourcery AI summary from PR body as primary source
- Add fallback to Qwen AI summarization when Sourcery summary unavailable
- Refactor notification payload to conditionally use Sourcery or Qwen summary
- Update step conditions to skip Qwen processing when Sourcery summary found
- Improve code formatting and indentation consistency in Python scripts
- Reduce redundant file I/O by writing directly to GITHUB_OUTPUT
2026-04-14 16:24:20 +08:00
山程漫悟
7a84ee33c6 Merge pull request #891 from SuanmoSuanyangTechnology/fix/Timebomb_030
fix(http-request,embedding,naive)
2026-04-14 16:22:56 +08:00
Timebomb2018
e3265e4ba3 fix(http-request,embedding,naive): tighten form-data validation, reduce truncation length to 8000, and disable chunking for Excel
The form-data validation now ensures all items in the list are of type HttpFormData. Truncation length for embedding inputs is reduced from 8191 to 8000 to accommodate tokenizer differences and avoid overflow. Excel parsing now disables chunking by setting chunk_token_num to 0, aligning with intended behavior for structured file ingestion.
2026-04-14 16:14:01 +08:00
yingzhao
3e7a004599 Merge pull request #890 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
fix(web): adjust the value of End User Name
2026-04-14 16:07:55 +08:00
zhaoying
fa1e5ee43c fix(web): adjust the value of End User Name 2026-04-14 16:06:03 +08:00
山程漫悟
c72a6fd724 Merge pull request #889 from SuanmoSuanyangTechnology/fix/Timebomb_030
fix(workflow http-request)
2026-04-14 15:58:28 +08:00
Timebomb2018
0965008210 fix(http-request): support array and file variables in form-data files upload
- Updated form-data handling to accept both single FileVariable and ArrayVariable containing FileVariable for file uploads
- Fixed HTTP client redirect handling by enabling follow_redirects=True when downloading remote files
- Adjusted config validation to correctly require list type for form-data fields instead of HttpFormData class
2026-04-14 15:53:16 +08:00
yingzhao
bcadd2a6f1 Merge pull request #888 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
fix(web): change http body key name
2026-04-14 15:10:21 +08:00
zhaoying
b5ec5c2cea fix(web): change http body key name 2026-04-14 15:08:07 +08:00
yingzhao
aa683efaa0 Merge pull request #884 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
fix(web): calculate using the filtered breadcrumbs length
2026-04-14 10:05:05 +08:00
zhaoying
2d9986f902 fix(web): header user name 2026-04-14 10:03:46 +08:00
zhaoying
06075ffef5 fix(web): calculate using the filtered breadcrumbs length 2026-04-14 09:57:36 +08:00
Ke Sun
a7336b0829 Merge pull request #883 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): refactor AI summary generation to Python
2026-04-13 19:37:11 +08:00
Ke Sun
0d16e168e7 ci(wechat-notify): refactor AI summary generation to Python
- Replace curl with urllib.request for API calls to improve portability
- Move API key to environment variable for better security practices
- Inline Python script using heredoc for cleaner workflow definition
- Add intermediate file (ai_summary.txt) to separate concerns between API call and output handling
- Simplify JSON payload construction using Python's json module
- Improve error handling with fallback message for failed AI generation
2026-04-13 19:36:27 +08:00
Ke Sun
a882e5e5c4 Merge pull request #882 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): refine AI prompt for commit summarization
2026-04-13 19:34:09 +08:00
Ke Sun
c614bb5be7 ci(wechat-notify): refine AI prompt for commit summarization
- Update prompt instruction to request numbered list format
- Remove title and preamble from AI output for cleaner formatting
- Improve clarity by specifying "要点" (key points) in prompt
- Enhance consistency of release notification messages
2026-04-13 19:33:30 +08:00
Ke Sun
1ff0f3ebfd Merge pull request #881 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): replace curl with urllib for webhook request
2026-04-13 19:30:50 +08:00
Ke Sun
bafcb5c545 ci(wechat-notify): replace curl with urllib for webhook request
- Replace curl command with Python urllib.request for direct HTTP POST
- Remove intermediate wechat.json file write, send payload directly
- Add urllib.request import to Python script
- Simplify workflow by eliminating file I/O and shell command dependency
- Improves reliability by keeping notification logic entirely within Python
2026-04-13 19:30:15 +08:00
Ke Sun
f8d27fada6 Merge pull request #880 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): refactor payload building to Python script
2026-04-13 19:28:53 +08:00
Ke Sun
90365cd026 ci(wechat-notify): refactor payload building to Python script
- Extract WeChat notification payload construction from inline curl command
- Move environment variables to explicit env section for clarity
- Build JSON payload using Python for better string handling and readability
- Write payload to temporary file and pass to curl via -d @wechat.json
- Improves maintainability and reduces shell string escaping complexity
2026-04-13 19:28:10 +08:00
Ke Sun
d96c7b88f0 Merge pull request #879 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): inline payload building logic into workflow
2026-04-13 19:25:30 +08:00
Ke Sun
99559621c5 ci(wechat-notify): inline payload building logic into workflow
- Remove build_wechat_payload.py script and consolidate payload construction directly in workflow
- Eliminate intermediate environment variables and file I/O operations for cleaner execution
- Inline AI summary payload generation into curl request
- Inline WeChat notification payload generation into curl request
- Remove unnecessary checkout step since script is no longer needed
- Simplify workflow by reducing file dependencies and improving readability
2026-04-13 19:24:50 +08:00
Ke Sun
926f65a1ff Merge pull request #878 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): extract payload building logic to Python script
2026-04-13 19:21:33 +08:00
Ke Sun
b20971dc95 ci(wechat-notify): extract payload building logic to Python script
- Create new `.github/scripts/build_wechat_payload.py` to handle WeChat payload generation
- Replace inline Python string concatenation with dedicated script for better maintainability
- Add checkout step to access the script during workflow execution
- Simplify workflow by delegating payload construction to external script
- Improve code readability and reusability for future notification enhancements
2026-04-13 19:20:53 +08:00
Ke Sun
1ff0274027 Merge pull request #877 from SuanmoSuanyangTechnology/fix/simple-fix
ci(wechat-notify): replace shell string formatting with Python
2026-04-13 19:18:51 +08:00
Ke Sun
8495aa5dde ci(wechat-notify): replace shell string formatting with Python
- Replace printf and jq command chain with Python script for payload generation
- Improve readability by using Python string concatenation instead of nested printf format specifiers
- Ensure proper JSON encoding with ensure_ascii=False to preserve Chinese characters
- Simplify environment variable interpolation using os.environ dictionary access
2026-04-13 19:18:11 +08:00
Ke Sun
d8ef7a8e02 Merge pull request #876 from SuanmoSuanyangTechnology/fix/simple-fix
ci: add WeChat release notification workflow
2026-04-13 19:16:30 +08:00
Ke Sun
7a4a02b2bb ci: add WeChat release notification workflow
- Add GitHub Actions workflow to notify WeChat on release branch merges
- Implement multi-step pipeline: sync ref, verify latest PR, fetch commits
- Integrate Aliyun Qwen AI for automated Chinese commit message summarization
- Send formatted Markdown notifications to WeChat webhook with release details
- Include branch, author, PR title, AI summary, and PR link in notifications
2026-04-13 19:15:54 +08:00
Ke Sun
8f623a66c8 Merge pull request #875 from SuanmoSuanyangTechnology/fix/simple-fix
chore(.gitignore): add redbear-mem-benchmark to ignored paths
2026-04-13 19:14:09 +08:00
Ke Sun
77ed9faea1 chore(.gitignore): add redbear-mem-benchmark to ignored paths
- Add redbear-mem-benchmark directory to .gitignore
- Prevents benchmark artifacts from being tracked in version control
- Aligns with existing pattern of ignoring redbear-mem-metrics directory
2026-04-13 19:13:23 +08:00
Ke Sun
1ff3748935 ci: remove release notification workflow
- Delete release-notify.yml GitHub Actions workflow
- Remove AI-powered release summary generation via Qwen API
- Remove WeChat enterprise notification integration
- Simplify CI/CD pipeline by consolidating notification logic
2026-04-13 19:11:15 +08:00
Ke Sun
f023c43f80 Merge pull request #874 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
fix(web): breadcrumb ui
2026-04-13 19:08:22 +08:00
zhaoying
70d4e79de1 fix(web): breadcrumb ui 2026-04-13 19:05:32 +08:00
Ke Sun
62f345b3de Merge pull request #872 from SuanmoSuanyangTechnology/fix/implicit-num
refactor(memory): use MemorySummary node count for implicit memory me…
2026-04-13 19:03:35 +08:00
Ke Sun
52e726eabc ci: add release notification workflow for merged PRs
- Add GitHub Actions workflow to notify on merged release branch PRs
- Implement HEAD sync check to ensure branch is up-to-date before notification
- Fetch commit messages from merged PR for AI summarization
- Integrate Alibaba Qwen AI to generate Chinese release summaries for QA team
- Send formatted Markdown notifications to WeChat webhook with PR details and AI summary
- Workflow triggers only on final PR merge to release branches to avoid duplicate notifications
2026-04-13 18:53:49 +08:00
lanceyq
9470dd2f1e refactor(memory): extract shared MemorySummary count query and replace magic number
- Move duplicated Neo4j MemorySummary count query into
  MemoryBaseService.get_valid_memory_summary_count()
- Introduce MIN_MEMORY_SUMMARY_COUNT constant to replace hardcoded 5
- Fix import ordering in implicit_emotions_storage_repository
- Use UTC consistently for date calculations (remove CST offset,
  datetime.now → datetime.utcnow)
2026-04-13 18:47:56 +08:00
lanceyq
ef8c7093b5 refactor(memory): use MemorySummary node count for implicit memory metrics
- Replace Statement-based implicit memory count (count/3) with actual
  MemorySummary node count filtered by DERIVED_FROM_STATEMENT relationship
- Add minimum threshold of 5 MemorySummary nodes before reporting data
- Add _build_empty_profile() to return structured empty profile when
  insufficient data exists, skipping unnecessary LLM calls
2026-04-13 18:32:43 +08:00
yingzhao
05ea372776 Merge pull request #871 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
fix(web): third variable
2026-04-13 15:46:53 +08:00
zhaoying
2b067ce08a fix(web): third variable 2026-04-13 15:35:45 +08:00
山程漫悟
b63cff2993 Merge pull request #870 from SuanmoSuanyangTechnology/fix/Timebomb_030
fix(user)
2026-04-13 14:44:40 +08:00
Timebomb2018
5bb9ce9018 fix(user): add user retrieval regardless of active status and update DSL config enrichment
Added `get_user_by_id_regardless_active` in user repository to support activation/deactivation workflows, updated `user_service` to use it, and refactored `_enrich_release_config` in `app_dsl_service` to accept `default_model_config_id` as a parameter instead of reading from config dict.
2026-04-13 14:40:57 +08:00
yingzhao
aa581a9083 Merge pull request #869 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
Fix/v0.3.0 zy
2026-04-13 14:05:33 +08:00
zhaoying
ac51ccaf1f fix(web): ui fix 2026-04-13 14:04:31 +08:00
Ke Sun
bd955569b3 Merge pull request #868 from SuanmoSuanyangTechnology/fix/unique-parameter
refactor(neo4j): rename execute_query parameter from query to cypher
2026-04-13 13:54:25 +08:00
lanceyq
7a2a941ac4 refactor(neo4j): rename execute_query parameter from query to cypher
Improves readability by making the parameter name explicitly reflect
that it expects a Cypher query string rather than a generic query.
2026-04-13 13:47:59 +08:00
zhaoying
62355186ef fix(web): echarts grid 2026-04-13 13:38:10 +08:00
yingzhao
11ea486f82 Merge pull request #866 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
Fix/v0.3.0 zy
2026-04-13 12:20:06 +08:00
zhaoying
efdee32f85 fix(web): update chat variable defaultValue validate rule 2026-04-13 12:16:32 +08:00
zhaoying
988d101e93 fix(web): tool checklist 2026-04-13 12:12:49 +08:00
yingzhao
418f9f4dba Merge pull request #865 from SuanmoSuanyangTechnology/fix/v0.3.0_zy
Fix/v0.3.0 zy
2026-04-13 12:02:58 +08:00
zhaoying
520ee7c132 fix(web): sub node connected 2026-04-13 12:01:37 +08:00
zhaoying
2b52b32b96 fix(web): variable ui update 2026-04-13 11:36:14 +08:00
yingzhao
b8acc0a32f Merge pull request #864 from SuanmoSuanyangTechnology/feature/file_variable_zy
fix(web): i18n update
2026-04-13 10:24:17 +08:00
zhaoying
e1cf3bb3d2 fix(web): i18n update 2026-04-13 10:21:35 +08:00
yingzhao
6f66c9727f Merge pull request #863 from SuanmoSuanyangTechnology/feature/file_variable_zy
fix(web): stream loading
2026-04-10 18:57:43 +08:00
zhaoying
3beca641e1 fix(web): stream loading 2026-04-10 18:56:31 +08:00
Ke Sun
b8507a1df6 Merge pull request #843 from SuanmoSuanyangTechnology/feature/openclaw_lm
Feature/openclaw lm
2026-04-10 18:54:09 +08:00
miao
0f28d54c43 fix(tools): add get_required_config_parameters to OpenClawTool
Without this method, the tool status would show as available even when
server_url and api_key are not configured.
2026-04-10 18:47:31 +08:00
山程漫悟
4c2a1e6d1d Merge pull request #861 from SuanmoSuanyangTechnology/feature/agent-tool_xjn
feat(workflow)
2026-04-10 18:39:48 +08:00
Timebomb2018
7cfb6ace22 Merge branch 'refs/heads/develop' into feature/agent-tool_xjn 2026-04-10 18:33:39 +08:00
山程漫悟
91cc20d589 Merge pull request #857 from wanxunyang/feature/switch-app-version-for-shared-api-key-apps
feat: add versioned app chat API and fix release isolation bug
2026-04-10 18:33:03 +08:00
Timebomb2018
f01ca51896 Merge branch 'refs/heads/develop' into feature/agent-tool_xjn 2026-04-10 18:30:46 +08:00
Timebomb2018
f4a63f7d55 feat(workflow): support Dify features conversion and file variable migration 2026-04-10 18:30:12 +08:00
Ke Sun
0019f3acfd Merge pull request #860 from SuanmoSuanyangTechnology/hotfix/v0.2.10
Hotfix/v0.2.10
2026-04-10 18:29:38 +08:00
yingzhao
bc14c94407 Merge pull request #858 from SuanmoSuanyangTechnology/feature/file_variable_zy
Feature/file variable zy
2026-04-10 18:16:44 +08:00
zhaoying
a21dad70ed feat(web): workflow publish add check list validate 2026-04-10 18:13:58 +08:00
zhaoying
807a4e715d feat(web): app api add body parameter example 2026-04-10 18:11:09 +08:00
Ke Sun
58d18b476c Merge pull request #851 from SuanmoSuanyangTechnology/feat/extract-metadata
Feat/extract metadata
2026-04-10 18:11:04 +08:00
Ke Sun
5e5927a0b9 Merge pull request #852 from SuanmoSuanyangTechnology/fix/rag-num
fix:Remove "total"
2026-04-10 18:06:50 +08:00
wxy
7869121382 feat: add versioned app chat API and fix release isolation bug 2026-04-10 17:53:24 +08:00
zhaoying
7c0fb624d9 feat(web): workflow variable type 2026-04-10 17:34:38 +08:00
wxy
af83980f99 feat: add versioned app chat API and fix release isolation bug 2026-04-10 17:22:11 +08:00
山程漫悟
cf0d11208c Merge pull request #855 from wanxunyang/feature/switch-app-version-for-shared-api-key-apps
Feature/switch app version for shared api key apps
2026-04-10 16:36:06 +08:00
zhaoying
87d1630230 fix(web): hidden rag memory total 2026-04-10 16:33:27 +08:00
山程漫悟
50392384e7 Merge pull request #856 from SuanmoSuanyangTechnology/feature/agent-tool_xjn
feat(workflow)
2026-04-10 16:24:51 +08:00
zhaoying
9a926a8398 feat(web): workflow variable type 2026-04-10 16:24:36 +08:00
Timebomb2018
e5e6699168 feat(workflow): support nested variable access and DashScope rerank provider 2026-04-10 16:21:49 +08:00
Timebomb2018
068e2bfb7e fix(workflow): update output pattern to handle standalone curly braces 2026-04-10 15:24:18 +08:00
Timebomb2018
4ce6fede67 fix(workflow): update cycle graph node output type validation 2026-04-10 14:08:51 +08:00
miao
8497c955f9 fix(tools): make image_understand image_url optional and remove unused operation variable
Change image_url from required to optional in both operation_tool.py and
tool_service.py for image_understand operation, avoiding parameter validation
conflict with uploaded_files priority logic.
Remove unused operation variable from OpenClawTool.execute().
2026-04-10 13:31:09 +08:00
wxy
72fe3962cf feat(api): Support specifying app version for chat 2026-04-10 12:18:11 +08:00
wxy
c253968aa8 feat(api): Support specifying app version for chat 2026-04-10 12:10:24 +08:00
zhaoying
d517bceda2 fix(web): object/array[object] add format check 2026-04-10 12:03:02 +08:00
wxy
412183c359 feat(api): Support specifying app version for chat 2026-04-10 11:44:50 +08:00
wxy
90e8e90528 feat(api): Support specifying app version for chat 2026-04-10 11:11:39 +08:00
wxy
fd05c000f6 feat(api): Support specifying app version for chat 2026-04-10 11:04:59 +08:00
lanceyq
627d6a0381 fix : add comments 2026-04-10 10:43:43 +08:00
Ke Sun
807dee8460 Merge branch 'hotfix/v0.2.10' into develop 2026-04-10 10:16:39 +08:00
lanceyq
cd018814fe fix(memory): improve metadata language detection and clean_metadata logic
- Make MetadataExtractor language param optional (default None) to
  support auto-detection fallback when no language is explicitly set
- Refactor clean_metadata from walrus-operator dict comprehension to
  explicit loop for correctness and readability
2026-04-10 00:42:11 +08:00
lanceyq
e0b7e95af6 refactor(memory): remove first-person pronoun replacement and inline metadata utils
- Remove _replace_first_person_with_user from StatementExtractor to preserve
  original user text for downstream metadata/alias extraction
- Delete metadata_utils.py module, inline clean_metadata into Celery task
- Remove unused imports and commented-out collect_user_raw_messages method
- Apply formatting cleanup across metadata models and extraction orchestrator
2026-04-10 00:29:18 +08:00
yingzhao
3a62d50048 Merge pull request #850 from SuanmoSuanyangTechnology/feature/tool_zy
feat(web): start/chat variable name cannot be duplicated
2026-04-09 22:43:55 +08:00
zhaoying
0e60da6d8a feat(web): start/chat variable name cannot be duplicated 2026-04-09 22:42:27 +08:00
yingzhao
39e94eb3ea Merge pull request #849 from SuanmoSuanyangTechnology/feature/tool_zy
Feature/tool zy
2026-04-09 22:31:32 +08:00
yingzhao
3e0f59adc6 Merge pull request #848 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
fix(web): deep_thinking reset
2026-04-09 22:30:26 +08:00
zhaoying
660cd2fadb fix(web): deep_thinking reset 2026-04-09 22:29:31 +08:00
zhaoying
6f1bb43eab fix(web): model list add query 2026-04-09 22:21:38 +08:00
yingzhao
61b5627505 Merge pull request #847 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
fix(web): agent knowledge
2026-04-09 22:15:08 +08:00
zhaoying
af6392fb09 Merge branch 'feature/ui_upgrade_zy' of github.com:SuanmoSuanyangTechnology/MemoryBear into feature/ui_upgrade_zy 2026-04-09 22:14:33 +08:00
zhaoying
84b1a95313 fix(web): iteration/loop toFront 2026-04-09 22:13:52 +08:00
zhaoying
8b21dab255 fix(web): agent knowledge 2026-04-09 22:09:32 +08:00
lanceyq
fc5ce63e44 fix:Remove "total" 2026-04-09 21:57:17 +08:00
lanceyq
15a863b41a feat(memory): unify alias extraction into metadata pipeline and deduplicate user entity nodes
- Merge alias add/remove into MetadataExtractionResponse and Celery metadata task,
  removing the separate sync step from extraction_orchestrator
- Replace first-person pronouns ("我") with "用户" in statement extraction to
  preserve identity semantics for downstream metadata/alias extraction
- Update extract_statement.jinja2 prompt to enforce "用户" as subject for user
  statements instead of resolving to real names
- Add alias change instructions (aliases_to_add/aliases_to_remove) to
  extract_user_metadata.jinja2 with incremental merge logic
- Deduplicate special entities ("用户", "AI助手") in graph_saver by reusing
  existing Neo4j node IDs per end_user_id
- Sync final aliases from PgSQL to Neo4j user entity nodes after metadata write
2026-04-09 21:55:59 +08:00
yingzhao
5226c5b79d Merge pull request #846 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
fix(web): remove port hover style
2026-04-09 21:32:54 +08:00
zhaoying
27e9f9968d fix(web): remove port hover style 2026-04-09 21:31:36 +08:00
yingzhao
d38612a10d Merge pull request #845 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
Feature/UI upgrade zy
2026-04-09 21:16:12 +08:00
zhaoying
32c71dcd89 Merge branch 'feature/ui_upgrade_zy' of github.com:SuanmoSuanyangTechnology/MemoryBear into feature/ui_upgrade_zy 2026-04-09 21:13:59 +08:00
zhaoying
428e7ebaa5 fix(web): agent knowledge bases config 2026-04-09 21:12:59 +08:00
yingzhao
57833689d9 Merge pull request #844 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
fix(web): edge connected event
2026-04-09 20:35:09 +08:00
zhaoying
384a67482c fix(web): edge connected event 2026-04-09 20:29:17 +08:00
miao
7842435321 fix(tools): forward set_runtime_context through OperationTool to base_tool
OperationTool wraps builtin tools for multi-operation support but did not
forward set_runtime_context, causing OpenClawTool to miss uploaded_files
and conversation_id when used with operation routing.
2026-04-09 20:01:07 +08:00
zhaoying
33c4c5d31b feat(web): add file type chat variable 2026-04-09 19:45:57 +08:00
Timebomb2018
ca4f7aa65d refactor(rag/nlp): refactor reranking logic to apply post-deduplication and remove debug log 2026-04-09 19:35:43 +08:00
miao
b875626f18 fix(tools): revert CustomTool __init__ to upstream, remove redundant schema parsing
The _parse_openapi_schema() method already handles string-to-dict conversion internally, so the duplicate json.loads in __init__ was unnecessary.
2026-04-09 19:33:27 +08:00
Timebomb2018
130684cac0 refactor(rag/nlp): standardize knowledge graph retrieval to use DocumentChunk and add debug logging
The knowledge graph retrieval logic in `search.py` was updated to consistently return `DocumentChunk` instances instead of raw dictionaries, improving type safety and alignment with the RAG pipeline's expected data structure. Additionally, debug logging was enhanced in `draft_run_service.py` to log the full `retrieve_chunks_result` before extracting page content, aiding troubleshooting.
2026-04-09 19:07:53 +08:00
zhaoying
5adff38bda feat(web): workflow check list 2026-04-09 18:58:21 +08:00
Timebomb2018
62e0b2730b refactor(workflow/knowledge): update pattern matching to support multiple retrieve types 2026-04-09 18:29:08 +08:00
miao
55b2e05ba8 feat(tools): refactor migrate OpenClaw from custom tool to builtin tool
Create OpenClawTool class inheriting BuiltinTool with dedicated config
Remove all x-openclaw special handling from CustomTool (~270 lines)
Add multi-operation support (print_task, device_query, image_understand, general)
Change ensure_builtin_tools_initialized to incremental mode for auto-provisioning
Fix OperationTool and LangchainAdapter to support OpenClaw operation routing
2026-04-09 18:14:31 +08:00
miao
562ca6c1f1 fix(tools): fix OpenClaw connection test and multimodal format compatibility
- Use safe .get() for server URL to avoid KeyError
- Support both api_key and token in connection test auth
- Add OpenAI/Volcano image format (image_url) support
- Add aiohttp import in _test_openclaw_connection
2026-04-09 18:14:30 +08:00
miao
e298b38de9 feat(tools): add OpenClaw remote agent tool integration
- Detect x-openclaw flag in OpenAPI schema and init dedicated config
- Implement multimodal input/output (image download, compress, base64)
- Add OpenClaw connection test and status validation in tool service
- Fix auth_config token check to support both api_key and bearer_token
- Inject runtime context (user_id, conversation_id, files) in chat services
2026-04-09 18:14:29 +08:00
Timebomb2018
a7b8ba0c66 fix(rag): fix pdfplumber concurrency issue and add debug logging
The pdfplumber parser now uses a global lock to prevent concurrent access issues during PDF image rendering. Additionally, added a warning log to trace knowledge retrieval results for debugging purposes. The syntax fix in knowledge node's match case ensures correct pattern matching behavior.

BREAKING CHANGE: The pdfplumber parser now requires LOCK_KEY_pdfplumber to be defined in sys.modules for thread safety.

Closes #841
2026-04-09 17:48:16 +08:00
yingzhao
460c86cd94 Merge pull request #842 from SuanmoSuanyangTechnology/feature/tool_zy
fix(web): if-else node case show
2026-04-09 17:46:52 +08:00
zhaoying
33a1c178ff fix(web): if-else node case show 2026-04-09 17:45:42 +08:00
yingzhao
c81612e6d3 Merge pull request #841 from SuanmoSuanyangTechnology/feature/tool_zy
feat(web): add OpenClawTool
2026-04-09 17:39:26 +08:00
zhaoying
9f9ac69f97 feat(web): add OpenClawTool 2026-04-09 17:38:35 +08:00
yingzhao
0516822d42 Merge pull request #840 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
fix(web): if-else/question-classifier add node front
2026-04-09 16:37:52 +08:00
zhaoying
b598171a3d fix(web): if-else/question-classifier add node front 2026-04-09 16:34:04 +08:00
yingzhao
a4ea7f0385 Merge pull request #839 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
fix(web): port add node front
2026-04-09 16:15:39 +08:00
zhaoying
32ae60fc65 fix(web): port add node front 2026-04-09 16:14:24 +08:00
yingzhao
6b272c5b44 Merge pull request #838 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
Feature/UI upgrade zy
2026-04-09 15:29:43 +08:00
zhaoying
2782d0661f fix(web): retrieve types add graph 2026-04-09 15:28:45 +08:00
Timebomb2018
ea2f5e61c9 fix(tool): strip input_value in datetime_to_timestamp to prevent whitespace-related parsing errors 2026-04-09 15:18:39 +08:00
Timebomb2018
5975d70bf9 feat(tool): add datetime_to_timestamp operation with timezone support 2026-04-09 15:14:15 +08:00
lanceyq
e0546e01ef refactor(memory): delegate metadata merging to LLM instead of code-based merge
- Remove merge_metadata and its helper functions from metadata_utils.py
- Pass existing_metadata to MetadataExtractor.extract_metadata() as LLM context
- Add merge instructions to extract_user_metadata.jinja2 prompt (zh/en)
- Update Celery task to read existing metadata before extraction and overwrite
- Simplify field descriptions in UserMetadataProfile model
- Add _update_timestamps helper to track changed fields
2026-04-09 15:10:29 +08:00
Timebomb2018
70aab94fc3 feat(knowledge): support graph retrieval type with dynamic API key selection 2026-04-09 15:00:49 +08:00
zhaoying
b7c1ce261b fix(web): remove tooltip 2026-04-09 13:43:47 +08:00
yingzhao
edac6a164e Merge pull request #836 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
fix(web): editor init
2026-04-09 12:33:33 +08:00
zhaoying
1503b242ea fix(web): editor init 2026-04-09 12:32:24 +08:00
yingzhao
18fd48505d Merge pull request #835 from SuanmoSuanyangTechnology/feature/ui_upgrade_zy
Feature/UI upgrade zy
2026-04-09 11:50:58 +08:00
zhaoying
807ddce5cd fix(web): remove editor variable space 2026-04-09 11:50:03 +08:00
zhaoying
62fb6c79a0 fix(web): change pdf enhancement method init value 2026-04-09 11:47:23 +08:00
zhaoying
cc373b2864 fix(web): loop/iteration edge 2026-04-09 11:45:41 +08:00
lanceyq
f2d7479229 feat(memory): add async user metadata extraction pipeline
- Add MetadataExtractor to collect user-related statements post-dedup
  and extract profile/behavioral metadata via independent LLM call
- Add Celery task (extract_user_metadata) routed to memory_tasks queue
- Add metadata models (UserMetadata, UserMetadataProfile, etc.)
- Add metadata utility functions (clean, validate, merge with _op support)
- Add Jinja2 prompt template for metadata extraction (zh/en)
- Fix Lucene query parameter naming: rename `q` to `query` across all
  Cypher queries, graph_search functions, and callers
- Escape `/` in Lucene queries to prevent TokenMgrError
- Add `speaker` field to ChunkNode and persist it in Neo4j
- Remove unused imports (argparse, os, UUID) in search.py
- Fix unnecessary db context nesting in interest distribution task
2026-04-09 11:01:56 +08:00
Ke Sun
ae1909b7e9 Merge pull request #833 from SuanmoSuanyangTechnology/release/v0.2.10
Release/v0.2.10
2026-04-08 21:45:35 +08:00
Mark
e817cfd292 Merge pull request #797 from SuanmoSuanyangTechnology/revert-796-feat/app-log-wxy
Revert "fix(workflow): restore opening statement and citation in shared conversations"
2026-04-07 17:12:49 +08:00
Mark
e48b146e60 Revert "fix(workflow): restore opening statement and citation in shared conversations" 2026-04-07 17:11:45 +08:00
Mark
07b66a9801 Merge pull request #796 from wanxunyang/feat/app-log-wxy
fix(workflow): restore opening statement and citation in shared conversations
2026-04-07 17:10:56 +08:00
wxy
cd8229f370 fix(workflow): restore opening statement and citation display in shared workflows 2026-04-07 15:57:09 +08:00
Ke Sun
cfbf83f71e Merge pull request #787 from SuanmoSuanyangTechnology/fix/atomic-update
fix(memory): improve optimistic lock resilience in access history man…
2026-04-07 10:57:20 +08:00
lanceyq
99862db7a0 refactor(forgetting-engine): replace optimistic locking with APOC atomic operations in access history manager
- Replace version-based optimistic locking and retry loop with apoc.atomic.add/insert for concurrent safety
- Merge duplicate accesses within a batch before updating (access_count_delta)
- Simplify _calculate_update to only compute on new timestamps instead of full history rebuild
- Remove max_retries instance variable (kept as param for backward compat)
- Trim verbose docstrings and inline comments
2026-04-03 18:40:03 +08:00
lanceyq
00a8099857 changes:(api) Change the "jitter" to "tremble". 2026-04-03 16:55:53 +08:00
lanceyq
117e29fbe3 fix(memory): improve optimistic lock resilience in access history manager
- Increase max_retries from 3 to 5 for concurrent conflict recovery
- Add randomized exponential backoff between retries to reduce contention
- Merge duplicate node accesses in batch operations to avoid self-conflicts
- Support access_times parameter for merged batch access counting
- Add Community node label support in atomic update content field map
2026-04-03 16:46:09 +08:00
143 changed files with 4045 additions and 1256 deletions

View File

@@ -0,0 +1,157 @@
name: Release Notify Workflow
on:
pull_request:
types: [closed]
jobs:
notify:
if: >
github.event.pull_request.merged == true &&
startsWith(github.event.pull_request.base.ref, 'release')
runs-on: ubuntu-latest
steps:
# 防止 GitHub HEAD 未同步
- run: sleep 3
# 1⃣ 获取分支 HEAD
- name: Get HEAD
id: head
run: |
HEAD_SHA=$(curl -s \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
https://api.github.com/repos/${{ github.repository }}/git/ref/heads/${{ github.event.pull_request.base.ref }} \
| jq -r '.object.sha')
echo "head_sha=$HEAD_SHA" >> $GITHUB_OUTPUT
# 2⃣ 判断是否最终PR
- name: Check Latest
id: check
run: |
if [ "${{ github.event.pull_request.merge_commit_sha }}" = "${{ steps.head.outputs.head_sha }}" ]; then
echo "ok=true" >> $GITHUB_OUTPUT
else
echo "ok=false" >> $GITHUB_OUTPUT
fi
# 3⃣ 尝试从 PR body 提取 Sourcery 摘要
- name: Extract Sourcery Summary
if: steps.check.outputs.ok == 'true'
id: sourcery
env:
PR_BODY: ${{ github.event.pull_request.body }}
run: |
python3 << 'PYEOF'
import os, re
body = os.environ.get("PR_BODY", "") or ""
match = re.search(
r"## Summary by Sourcery\s*\n(.*?)(?=\n## |\Z)",
body,
re.DOTALL
)
if match:
summary = match.group(1).strip()
found = "true"
else:
summary = ""
found = "false"
with open("sourcery_summary.txt", "w", encoding="utf-8") as f:
f.write(summary)
with open(os.environ["GITHUB_OUTPUT"], "a") as gh:
gh.write(f"found={found}\n")
gh.write("summary<<EOF\n")
gh.write(summary + "\n")
gh.write("EOF\n")
PYEOF
# 4⃣ Fallback: 获取 commits + 通义千问总结
- name: Get Commits
if: steps.check.outputs.ok == 'true' && steps.sourcery.outputs.found == 'false'
run: |
curl -s \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
${{ github.event.pull_request.commits_url }} \
| jq -r '.[].commit.message' | head -n 20 > commits.txt
- name: AI Summary (Qwen Fallback)
if: steps.check.outputs.ok == 'true' && steps.sourcery.outputs.found == 'false'
id: qwen
env:
DASHSCOPE_API_KEY: ${{ secrets.DASHSCOPE_API_KEY }}
run: |
python3 << 'PYEOF'
import json, os, urllib.request
with open("commits.txt", "r") as f:
commits = f.read().strip()
prompt = "请用中文总结以下代码提交输出3-5条要点面向测试人员。直接输出编号列表不要输出标题或前言\n" + commits
payload = {"model": "qwen-plus", "input": {"prompt": prompt}}
data = json.dumps(payload, ensure_ascii=False).encode("utf-8")
req = urllib.request.Request(
"https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation",
data=data,
headers={
"Authorization": "Bearer " + os.environ["DASHSCOPE_API_KEY"],
"Content-Type": "application/json"
}
)
resp = urllib.request.urlopen(req)
result = json.loads(resp.read().decode())
summary = result.get("output", {}).get("text", "AI 摘要生成失败")
with open(os.environ["GITHUB_OUTPUT"], "a") as gh:
gh.write("summary<<EOF\n")
gh.write(summary + "\n")
gh.write("EOF\n")
PYEOF
# 5⃣ 企业微信通知Markdown
- name: Notify WeChat
if: steps.check.outputs.ok == 'true'
env:
WECHAT_WEBHOOK: ${{ secrets.WECHAT_WEBHOOK }}
BRANCH: ${{ github.event.pull_request.base.ref }}
AUTHOR: ${{ github.event.pull_request.user.login }}
PR_TITLE: ${{ github.event.pull_request.title }}
PR_URL: ${{ github.event.pull_request.html_url }}
SOURCERY_FOUND: ${{ steps.sourcery.outputs.found }}
SOURCERY_SUMMARY: ${{ steps.sourcery.outputs.summary }}
QWEN_SUMMARY: ${{ steps.qwen.outputs.summary }}
run: |
python3 << 'PYEOF'
import json, os, urllib.request
if os.environ.get("SOURCERY_FOUND") == "true":
label = "Summary by Sourcery"
summary = os.environ.get("SOURCERY_SUMMARY", "")
else:
label = "AI变更摘要"
summary = os.environ.get("QWEN_SUMMARY", "AI 摘要生成失败")
content = (
"## 🚀 Release 发布通知\n"
"> 📦 **分支**: " + os.environ["BRANCH"] + "\n"
"> 👤 **提交人**: " + os.environ["AUTHOR"] + "\n"
"> 📝 **标题**: " + os.environ["PR_TITLE"] + "\n\n"
"### 🧠 " + label + "\n" +
summary + "\n\n"
"---\n"
"🔗 [查看PR详情](" + os.environ["PR_URL"] + ")"
)
payload = {"msgtype": "markdown", "markdown": {"content": content}}
data = json.dumps(payload, ensure_ascii=False).encode("utf-8")
req = urllib.request.Request(
os.environ["WECHAT_WEBHOOK"],
data=data,
headers={"Content-Type": "application/json"}
)
resp = urllib.request.urlopen(req)
print(resp.read().decode())
PYEOF

1
.gitignore vendored
View File

@@ -27,6 +27,7 @@ time.log
celerybeat-schedule.db
search_results.json
redbear-mem-metrics/
redbear-mem-benchmark/
pitch-deck/
api/migrations/versions

View File

@@ -111,6 +111,9 @@ celery_app.conf.update(
# Clustering tasks → memory_tasks queue (使用相同的 worker避免 macOS fork 问题)
'app.tasks.run_incremental_clustering': {'queue': 'memory_tasks'},
# Metadata extraction → memory_tasks queue
'app.tasks.extract_user_metadata': {'queue': 'memory_tasks'},
# Document tasks → document_tasks queue (prefork worker)
'app.core.rag.tasks.parse_document': {'queue': 'document_tasks'},
'app.core.rag.tasks.build_graphrag_for_kb': {'queue': 'document_tasks'},

View File

@@ -136,7 +136,7 @@ async def refresh_token(
# 检查用户是否存在
user = auth_service.get_user_by_id(db, userId)
if not user:
raise BusinessException(t("auth.user.not_found"), code=BizCode.USER_NOT_FOUND)
raise BusinessException(t("auth.user.not_found"), code=BizCode.USER_NO_ACCESS)
# 检查 refresh token 黑名单
if settings.ENABLE_SINGLE_SESSION:

View File

@@ -23,6 +23,7 @@ from app.models.user_model import User
from app.schemas import chunk_schema
from app.schemas.response_schema import ApiResponse
from app.services import knowledge_service, document_service, file_service, knowledgeshare_service
from app.services.model_service import ModelApiKeyService
# Obtain a dedicated API logger
api_logger = get_api_logger()
@@ -460,18 +461,20 @@ async def retrieve_chunks(
if retrieve_data.retrieve_type == chunk_schema.RetrieveType.Graph:
kb_ids = [str(kb_id) for kb_id in private_kb_ids]
workspace_ids = [str(workspace_id) for workspace_id in private_workspace_ids]
llm_key = ModelApiKeyService.get_available_api_key(db, db_knowledge.llm_id)
emb_key = ModelApiKeyService.get_available_api_key(db, db_knowledge.embedding_id)
# Prepare to configure chat_mdl、embedding_model、vision_model information
chat_model = Base(
key=db_knowledge.llm.api_keys[0].api_key,
model_name=db_knowledge.llm.api_keys[0].model_name,
base_url=db_knowledge.llm.api_keys[0].api_base
key=llm_key.api_key,
model_name=llm_key.model_name,
base_url=llm_key.api_base
)
embedding_model = OpenAIEmbed(
key=db_knowledge.embedding.api_keys[0].api_key,
model_name=db_knowledge.embedding.api_keys[0].model_name,
base_url=db_knowledge.embedding.api_keys[0].api_base
key=emb_key.api_key,
model_name=emb_key.model_name,
base_url=emb_key.api_base
)
doc = kg_retriever.retrieval(question=retrieve_data.query, workspace_ids=workspace_ids, kb_ids= kb_ids, emb_mdl=embedding_model, llm=chat_model)
doc = kg_retriever.retrieval(question=retrieve_data.query, workspace_ids=workspace_ids, kb_ids=kb_ids, emb_mdl=embedding_model, llm=chat_model)
if doc:
rs.insert(0, doc)
return success(data=jsonable_encoder(rs), msg="retrieval successful")

View File

@@ -124,10 +124,11 @@ async def get_prompt_opt(
skill=data.skill
):
# chunk 是 prompt 的增量内容
yield f"event:message\ndata: {json.dumps(chunk)}\n\n"
yield f"event:message\ndata: {json.dumps(chunk, ensure_ascii=False)}\n\n"
except Exception as e:
yield f"event:error\ndata: {json.dumps(
{"error": str(e)}
{"error": str(e)},
ensure_ascii=False
)}\n\n"
yield "event:end\ndata: {}\n\n"

View File

@@ -14,6 +14,7 @@ from app.core.response_utils import success
from app.db import get_db
from app.models.app_model import App
from app.models.app_model import AppType
from app.models.app_release_model import AppRelease
from app.repositories import knowledge_repository
from app.repositories.end_user_repository import EndUserRepository
from app.schemas import AppChatRequest, conversation_schema
@@ -61,18 +62,18 @@ async def list_apps():
# return success(data={"received": True}, msg="消息已接收")
def _checkAppConfig(app: App):
if app.type == AppType.AGENT:
if not app.current_release.config:
def _checkAppConfig(release: AppRelease):
if release.type == AppType.AGENT:
if not release.config:
raise BusinessException("Agent 应用未配置模型", BizCode.AGENT_CONFIG_MISSING)
elif app.type == AppType.MULTI_AGENT:
if not app.current_release.config:
elif release.type == AppType.MULTI_AGENT:
if not release.config:
raise BusinessException("Multi-Agent 应用未配置模型", BizCode.AGENT_CONFIG_MISSING)
elif app.type == AppType.WORKFLOW:
if not app.current_release.config:
elif release.type == AppType.WORKFLOW:
if not release.config:
raise BusinessException("工作流应用未配置模型", BizCode.AGENT_CONFIG_MISSING)
else:
raise BusinessException("不支持的应用类型", BizCode.AGENT_CONFIG_MISSING)
raise BusinessException("不支持的应用类型", BizCode.APP_TYPE_NOT_SUPPORTED)
@router.post("/chat")
@@ -86,10 +87,22 @@ async def chat(
app_service: Annotated[AppService, Depends(get_app_service)] = None,
message: str = Body(..., description="聊天消息内容"),
):
"""
Agent/Workflow 聊天接口
- 不传 version使用当前生效版本current_release回滚后为回滚目标版本
- 传 version=release_id使用指定版本uuid的历史快照例如 {"version": "{{release_id}}"}
"""
body = await request.json()
payload = AppChatRequest(**body)
app = app_service.get_app(api_key_auth.resource_id, api_key_auth.workspace_id)
# 版本切换:指定 release_id 时查找对应历史快照,否则使用当前激活版本
if payload.version is not None:
active_release = app_service.get_release_by_id(app.id, payload.version)
else:
active_release = app.current_release
other_id = payload.user_id
workspace_id = api_key_auth.workspace_id
end_user_repo = EndUserRepository(db)
@@ -127,7 +140,7 @@ async def chat(
storage_type = 'neo4j'
app_type = app.type
# check app config
_checkAppConfig(app)
_checkAppConfig(active_release)
# 获取或创建会话(提前验证)
conversation = conversation_service.create_or_get_conversation(
@@ -142,7 +155,7 @@ async def chat(
# print("="*50)
# print(app.current_release.default_model_config_id)
agent_config = agent_config_4_app_release(app.current_release)
agent_config = agent_config_4_app_release(active_release)
# print(agent_config.default_model_config_id)
# thinking 开关:仅当 agent 配置了 deep_thinking 且请求 thinking=True 时才启用
@@ -194,7 +207,7 @@ async def chat(
return success(data=conversation_schema.ChatResponse(**result).model_dump(mode="json"))
elif app_type == AppType.MULTI_AGENT:
# 多 Agent 流式返回
config = multi_agent_config_4_app_release(app.current_release)
config = multi_agent_config_4_app_release(active_release)
if payload.stream:
async def event_generator():
async for event in app_chat_service.multi_agent_chat_stream(
@@ -237,7 +250,7 @@ async def chat(
return success(data=conversation_schema.ChatResponse(**result).model_dump(mode="json"))
elif app_type == AppType.WORKFLOW:
# 多 Agent 流式返回
config = workflow_config_4_app_release(app.current_release)
config = workflow_config_4_app_release(active_release)
if payload.stream:
async def event_generator():
async for event in app_chat_service.workflow_chat_stream(
@@ -253,7 +266,7 @@ async def chat(
user_rag_memory_id=user_rag_memory_id,
app_id=app.id,
workspace_id=workspace_id,
release_id=app.current_release.id,
release_id=active_release.id,
public=True
):
event_type = event.get("event", "message")
@@ -288,7 +301,7 @@ async def chat(
files=payload.files,
app_id=app.id,
workspace_id=workspace_id,
release_id=app.current_release.id
release_id=active_release.id
)
logger.debug(
"工作流试运行返回结果",
@@ -302,6 +315,4 @@ async def chat(
msg="工作流任务执行成功"
)
else:
from app.core.exceptions import BusinessException
from app.core.error_codes import BizCode
raise BusinessException(f"不支持的应用类型: {app_type}", BizCode.APP_TYPE_NOT_SUPPORTED)

View File

@@ -41,6 +41,7 @@ class BizCode(IntEnum):
FILE_NOT_FOUND = 4006
APP_NOT_FOUND = 4007
RELEASE_NOT_FOUND = 4008
USER_NO_ACCESS = 4009
# 冲突/状态5xxx
DUPLICATE_NAME = 5001
@@ -118,6 +119,7 @@ HTTP_MAPPING = {
BizCode.WORKSPACE_ACCESS_DENIED: 403,
BizCode.NOT_FOUND: 400,
BizCode.USER_NOT_FOUND: 200,
BizCode.USER_NO_ACCESS: 401,
BizCode.WORKSPACE_NOT_FOUND: 400,
BizCode.MODEL_NOT_FOUND: 400,
BizCode.KNOWLEDGE_NOT_FOUND: 400,

View File

@@ -153,7 +153,7 @@ class PerceptualSearchService:
return []
try:
r = await search_perceptual(
connector=connector, q=escaped,
connector=connector, query=escaped,
end_user_id=self.end_user_id,
limit=limit * 5, # 多查一些以提高命中率
)
@@ -178,7 +178,7 @@ class PerceptualSearchService:
if not escaped.strip():
return []
r = await search_perceptual(
connector=connector, q=escaped,
connector=connector, query=escaped,
end_user_id=self.end_user_id, limit=limit,
)
return r.get("perceptuals", [])

View File

@@ -14,6 +14,7 @@ from dotenv import load_dotenv
from app.core.logging_config import get_agent_logger
from app.core.memory.agent.utils.get_dialogs import get_chunked_dialogs
from app.core.memory.storage_services.extraction_engine.deduplication.deduped_and_disamb import _USER_PLACEHOLDER_NAMES
from app.core.memory.storage_services.extraction_engine.extraction_orchestrator import ExtractionOrchestrator
from app.core.memory.storage_services.extraction_engine.knowledge_extraction.memory_summary import \
memory_summary_generation
@@ -191,15 +192,37 @@ async def write(
if success:
logger.info("Successfully saved all data to Neo4j")
# 使用 Celery 异步任务触发聚类(不阻塞主流程)
if all_entity_nodes:
end_user_id = all_entity_nodes[0].end_user_id
# Neo4j 写入完成后,用 PgSQL 权威 aliases 覆盖 Neo4j 用户实体
try:
from app.repositories.end_user_info_repository import EndUserInfoRepository
if end_user_id:
with get_db_context() as db_session:
info = EndUserInfoRepository(db_session).get_by_end_user_id(uuid.UUID(end_user_id))
pg_aliases = info.aliases if info and info.aliases else []
if info is not None:
# 将 Python 侧占位名集合作为参数传入,避免 Cypher 硬编码
placeholder_names = list(_USER_PLACEHOLDER_NAMES)
await neo4j_connector.execute_query(
"""
MATCH (e:ExtractedEntity)
WHERE e.end_user_id = $end_user_id AND toLower(e.name) IN $placeholder_names
SET e.aliases = $aliases
""",
end_user_id=end_user_id, aliases=pg_aliases,
placeholder_names=placeholder_names,
)
logger.info(f"[AliasSync] Neo4j 用户实体 aliases 已用 PgSQL 权威源覆盖: {pg_aliases}")
except Exception as sync_err:
logger.warning(f"[AliasSync] PgSQL→Neo4j aliases 同步失败(不影响主流程): {sync_err}")
# 使用 Celery 异步任务触发聚类(不阻塞主流程)
try:
from app.tasks import run_incremental_clustering
end_user_id = all_entity_nodes[0].end_user_id
new_entity_ids = [e.id for e in all_entity_nodes]
# 异步提交 Celery 任务
task = run_incremental_clustering.apply_async(
kwargs={
"end_user_id": end_user_id,
@@ -207,7 +230,6 @@ async def write(
"llm_model_id": str(memory_config.llm_model_id) if memory_config.llm_model_id else None,
"embedding_model_id": str(memory_config.embedding_model_id) if memory_config.embedding_model_id else None,
},
# 设置任务优先级(低优先级,不影响主业务)
priority=3,
)
logger.info(
@@ -215,7 +237,6 @@ async def write(
f"task_id={task.id}, end_user_id={end_user_id}, entity_count={len(new_entity_ids)}"
)
except Exception as e:
# 聚类任务提交失败不影响主流程
logger.error(f"[Clustering] 提交聚类任务失败(不影响主流程): {e}", exc_info=True)
break

View File

@@ -58,6 +58,14 @@ from app.core.memory.models.triplet_models import (
TripletExtractionResponse,
)
# User metadata models
from app.core.memory.models.metadata_models import (
UserMetadata,
UserMetadataBehavioralHints,
UserMetadataProfile,
MetadataExtractionResponse,
)
# Ontology scenario models (LLM extracted from scenarios)
from app.core.memory.models.ontology_scenario_models import (
OntologyClass,
@@ -124,6 +132,10 @@ __all__ = [
"Entity",
"Triplet",
"TripletExtractionResponse",
"UserMetadata",
"UserMetadataBehavioralHints",
"UserMetadataProfile",
"MetadataExtractionResponse",
# Ontology models
"OntologyClass",
"OntologyExtractionResponse",

View File

@@ -364,12 +364,14 @@ class ChunkNode(Node):
Attributes:
dialog_id: ID of the parent dialog
content: The text content of the chunk
speaker: Speaker identifier ('user' or 'assistant')
chunk_embedding: Optional embedding vector for the chunk
sequence_number: Order of this chunk within the dialog
metadata: Additional chunk metadata as key-value pairs
"""
dialog_id: str = Field(..., description="ID of the parent dialog")
content: str = Field(..., description="The text content of the chunk")
speaker: Optional[str] = Field(None, description="Speaker identifier: 'user' for user messages, 'assistant' for AI responses")
chunk_embedding: Optional[List[float]] = Field(None, description="Chunk embedding vector")
sequence_number: int = Field(..., description="Order of this chunk within the dialog")
metadata: dict = Field(default_factory=dict, description="Additional chunk metadata")

View File

@@ -0,0 +1,57 @@
"""Models for user metadata extraction.
Independent from triplet_models.py - these models are used by the
standalone metadata extraction pipeline (post-dedup async Celery task).
"""
from typing import List
from pydantic import BaseModel, ConfigDict, Field
class UserMetadataProfile(BaseModel):
"""用户画像信息"""
model_config = ConfigDict(extra="ignore")
role: str = Field(default="", description="用户职业或角色")
domain: str = Field(default="", description="用户所在领域")
expertise: List[str] = Field(
default_factory=list, description="用户擅长的技能或工具"
)
interests: List[str] = Field(
default_factory=list, description="用户关注的话题或领域标签"
)
class UserMetadataBehavioralHints(BaseModel):
"""行为偏好"""
model_config = ConfigDict(extra="ignore")
learning_stage: str = Field(default="", description="学习阶段")
preferred_depth: str = Field(default="", description="偏好深度")
tone_preference: str = Field(default="", description="语气偏好")
class UserMetadata(BaseModel):
"""用户元数据顶层结构"""
model_config = ConfigDict(extra="ignore")
profile: UserMetadataProfile = Field(default_factory=UserMetadataProfile)
behavioral_hints: UserMetadataBehavioralHints = Field(
default_factory=UserMetadataBehavioralHints
)
knowledge_tags: List[str] = Field(default_factory=list, description="知识标签")
class MetadataExtractionResponse(BaseModel):
"""元数据提取 LLM 响应结构"""
model_config = ConfigDict(extra="ignore")
user_metadata: UserMetadata = Field(default_factory=UserMetadata)
aliases_to_add: List[str] = Field(
default_factory=list,
description="本次新发现的用户别名(用户自我介绍或他人对用户的称呼)",
)
aliases_to_remove: List[str] = Field(
default_factory=list, description="用户明确否认的别名(如'我不叫XX了'"
)

View File

@@ -1,4 +1,3 @@
import argparse
import asyncio
import json
import math
@@ -6,7 +5,6 @@ import os
import time
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from uuid import UUID
if TYPE_CHECKING:
from app.schemas.memory_config_schema import MemoryConfig
@@ -23,7 +21,7 @@ from app.core.memory.utils.config.config_utils import (
)
from app.core.memory.utils.data.text_utils import extract_plain_query
from app.core.memory.utils.data.time_utils import normalize_date_safe
from app.core.memory.utils.llm.llm_utils import get_reranker_client
# from app.core.memory.utils.llm.llm_utils import get_reranker_client
from app.core.models.base import RedBearModelConfig
from app.db import get_db_context
from app.repositories.neo4j.graph_search import (
@@ -748,11 +746,10 @@ async def run_hybrid_search(
if search_type in ["keyword", "hybrid"]:
# Keyword-based search
logger.info("[PERF] Starting keyword search...")
keyword_start = time.time()
keyword_task = asyncio.create_task(
search_graph(
connector=connector,
q=query_text,
query=query_text,
end_user_id=end_user_id,
limit=limit,
include=include
@@ -762,7 +759,6 @@ async def run_hybrid_search(
if search_type in ["embedding", "hybrid"]:
# Embedding-based search
logger.info("[PERF] Starting embedding search...")
embedding_start = time.time()
# 从数据库读取嵌入器配置(按 ID并构建 RedBearModelConfig
config_load_start = time.time()
@@ -904,10 +900,10 @@ async def run_hybrid_search(
else:
results["latency_metrics"] = latency_metrics
logger.info(f"[PERF] ===== SEARCH PERFORMANCE SUMMARY =====")
logger.info("[PERF] ===== SEARCH PERFORMANCE SUMMARY =====")
logger.info(f"[PERF] Total search completed in {total_latency:.4f}s")
logger.info(f"[PERF] Latency breakdown: {json.dumps(latency_metrics, indent=2)}")
logger.info(f"[PERF] =========================================")
logger.info("[PERF] =========================================")
# Sanitize results: drop large/unused fields
_remove_keys_recursive(results, ["name_embedding"]) # drop entity name embeddings from outputs

View File

@@ -82,51 +82,38 @@ def _merge_attribute(canonical: ExtractedEntityNode, ent: ExtractedEntityNode):
canonical.connect_strength = next(iter(pair))
# 别名合并(去重保序,使用标准化工具)
# 用户实体的 aliases 由 PgSQL end_user_info 作为唯一权威源,去重合并时不修改
try:
canonical_name = (getattr(canonical, "name", "") or "").strip()
incoming_name = (getattr(ent, "name", "") or "").strip()
# 收集所有需要合并的别名
all_aliases = []
# 1. 添加canonical现有的别名
existing = getattr(canonical, "aliases", []) or []
all_aliases.extend(existing)
# 2. 添加incoming实体的名称如果不同于canonical的名称
if incoming_name and incoming_name != canonical_name:
all_aliases.append(incoming_name)
# 3. 添加incoming实体的所有别名
incoming = getattr(ent, "aliases", []) or []
all_aliases.extend(incoming)
# 4. 标准化并去重优先使用alias_utils工具函数
try:
from app.core.memory.utils.alias_utils import normalize_aliases
canonical.aliases = normalize_aliases(canonical_name, all_aliases)
except Exception:
# 如果导入失败,使用增强的去重逻辑
seen_normalized = set()
unique_aliases = []
if canonical_name.lower() not in _USER_PLACEHOLDER_NAMES:
incoming_name = (getattr(ent, "name", "") or "").strip()
for alias in all_aliases:
if not alias:
continue
alias_stripped = str(alias).strip()
if not alias_stripped or alias_stripped == canonical_name:
continue
# 标准化:转小写用于去重判断
alias_normalized = alias_stripped.lower()
if alias_normalized not in seen_normalized:
seen_normalized.add(alias_normalized)
unique_aliases.append(alias_stripped)
# 收集所有需要合并的别名,过滤掉用户占位名避免污染非用户实体
all_aliases = list(getattr(canonical, "aliases", []) or [])
if incoming_name and incoming_name != canonical_name and incoming_name.lower() not in _USER_PLACEHOLDER_NAMES:
all_aliases.append(incoming_name)
all_aliases.extend(
a for a in (getattr(ent, "aliases", []) or [])
if a and a.strip().lower() not in _USER_PLACEHOLDER_NAMES
)
# 排序并赋值
canonical.aliases = sorted(unique_aliases)
try:
from app.core.memory.utils.alias_utils import normalize_aliases
canonical.aliases = normalize_aliases(canonical_name, all_aliases)
except Exception:
seen_normalized = set()
unique_aliases = []
for alias in all_aliases:
if not alias:
continue
alias_stripped = str(alias).strip()
if not alias_stripped or alias_stripped == canonical_name:
continue
alias_normalized = alias_stripped.lower()
if alias_normalized not in seen_normalized:
seen_normalized.add(alias_normalized)
unique_aliases.append(alias_stripped)
canonical.aliases = sorted(unique_aliases)
except Exception:
pass
@@ -733,66 +720,37 @@ def fuzzy_match(
def _merge_entities_with_aliases(canonical: ExtractedEntityNode, losing: ExtractedEntityNode):
""" 模糊匹配中的实体合并。
"""模糊匹配中的实体合并(别名部分)
合并策略:
1. 保留canonical的主名称不变
2. 将losing的主名称添加为alias如果不同
3. 合并两个实体的所有aliases
4. 自动去重case-insensitive并排序
Args:
canonical: 规范实体(保留)
losing: 被合并实体(删除)
Note:
使用alias_utils.normalize_aliases进行标准化去重
用户实体的 aliases 由 PgSQL end_user_info 作为唯一权威源,跳过合并。
"""
# 获取规范实体的名称
canonical_name = (getattr(canonical, "name", "") or "").strip()
if canonical_name.lower() in _USER_PLACEHOLDER_NAMES:
return
losing_name = (getattr(losing, "name", "") or "").strip()
# 收集所有需要合并的别名
all_aliases = []
# 1. 添加canonical现有的别名
current_aliases = getattr(canonical, "aliases", []) or []
all_aliases.extend(current_aliases)
# 2. 添加losing实体的名称如果不同于canonical的名称
all_aliases = list(getattr(canonical, "aliases", []) or [])
if losing_name and losing_name != canonical_name:
all_aliases.append(losing_name)
all_aliases.extend(getattr(losing, "aliases", []) or [])
# 3. 添加losing实体的所有别名
losing_aliases = getattr(losing, "aliases", []) or []
all_aliases.extend(losing_aliases)
# 4. 标准化并去重(使用标准化后的字符串进行去重)
try:
from app.core.memory.utils.alias_utils import normalize_aliases
canonical.aliases = normalize_aliases(canonical_name, all_aliases)
except Exception:
# 如果导入失败,使用增强的去重逻辑
# 使用标准化后的字符串作为key进行去重
seen_normalized = set()
unique_aliases = []
for alias in all_aliases:
if not alias:
continue
alias_stripped = str(alias).strip()
if not alias_stripped or alias_stripped == canonical_name:
continue
# 标准化:转小写用于去重判断
alias_normalized = alias_stripped.lower()
if alias_normalized not in seen_normalized:
seen_normalized.add(alias_normalized)
unique_aliases.append(alias_stripped)
# 排序并赋值
canonical.aliases = sorted(unique_aliases)
# ========== 主循环:遍历所有实体对进行模糊匹配 ==========

View File

@@ -311,10 +311,53 @@ class ExtractionOrchestrator:
dialog_data_list,
)
# 步骤 7: 同步用户别名到数据库表(仅正式模式)
# 步骤 7: 触发异步元数据和别名提取(仅正式模式)
if not is_pilot_run:
logger.info("步骤 7: 同步用户别名到 end_user 和 end_user_info 表")
await self._update_end_user_other_name(entity_nodes, dialog_data_list)
try:
from app.core.memory.storage_services.extraction_engine.knowledge_extraction.metadata_extractor import (
MetadataExtractor,
)
metadata_extractor = MetadataExtractor(
llm_client=self.llm_client, language=self.language
)
user_statements = (
metadata_extractor.collect_user_related_statements(
entity_nodes, statement_nodes, statement_entity_edges
)
)
if user_statements:
end_user_id = (
dialog_data_list[0].end_user_id
if dialog_data_list
else None
)
config_id = (
dialog_data_list[0].config_id
if dialog_data_list
and hasattr(dialog_data_list[0], "config_id")
else None
)
if end_user_id:
from app.tasks import extract_user_metadata_task
extract_user_metadata_task.delay(
end_user_id=str(end_user_id),
statements=user_statements,
config_id=str(config_id) if config_id else None,
language=self.language,
)
logger.info(
f"已触发异步元数据提取任务,共 {len(user_statements)} 条用户相关 statement"
)
else:
logger.info("未找到用户相关 statement跳过元数据提取")
except Exception as e:
logger.error(
f"触发元数据提取任务失败(不影响主流程): {e}", exc_info=True
)
# 别名同步已迁移到 Celery 元数据提取任务中,不再在此处执行
logger.info(f"知识提取流水线运行完成({mode_str}")
return (
@@ -1107,6 +1150,7 @@ class ExtractionOrchestrator:
end_user_id=dialog_data.end_user_id,
run_id=dialog_data.run_id, # 使用 dialog_data 的 run_id
content=chunk.content,
speaker=getattr(chunk, 'speaker', None),
chunk_embedding=chunk.chunk_embedding,
sequence_number=chunk_idx, # 添加必需的 sequence_number 字段
created_at=dialog_data.created_at,
@@ -1342,23 +1386,23 @@ class ExtractionOrchestrator:
async def _update_end_user_other_name(
self,
entity_nodes: List[ExtractedEntityNode],
dialog_data_list: List[DialogData]
dialog_data_list: List[DialogData],
) -> None:
"""
将本轮提取的用户别名同步到 end_user 和 end_user_info 表。
注意:此方法在 Neo4j 写入之前调用,因此不能依赖 Neo4j 作为别名的权威数据源。
改为直接使用内存中去重后的 entity_nodes 的 aliases与 PgSQL 已有的 aliases 合并。
PgSQL end_user_info.aliases 是用户别名的唯一权威源。
此方法仅将本轮 LLM 从对话中新提取的别名增量追加到 PgSQL
不再从 Neo4j 二层去重合并历史别名,避免脏数据反向污染 PgSQL。
策略:
1. 从内存中的 entity_nodes 提取本轮用户别名current_aliases
2. 从去重后的 entity_nodes 中提取完整别名(含 Neo4j 二层去重合并的历史别名
3. 从 PgSQL end_user_info 读取已有的 aliasesdb_aliases
4. 合并 db_aliases + deduped_aliases + current_aliases去重保序
5. 写回 PgSQL
1. 从本轮对话原始发言中提取用户别名current_aliases
2. 从 PgSQL end_user_info 读取已有的 aliasesdb_aliases
3. 合并 db_aliases + current_aliases,去重保序
4. 写回 PgSQL
Args:
entity_nodes: 去重后的实体节点列表(内存中,含二层去重合并结果
entity_nodes: 去重后的实体节点列表(内存中)
dialog_data_list: 对话数据列表
"""
try:
@@ -1374,11 +1418,6 @@ class ExtractionOrchestrator:
# 1. 提取本轮对话的用户别名(保持 LLM 提取的原始顺序,不排序)
current_aliases = self._extract_current_aliases(entity_nodes, dialog_data_list)
# 1.5 从去重后的 entity_nodes 中提取完整别名
# 二层去重会将 Neo4j 中已有的历史别名合并到 entity_nodes 中,
# 这里提取出来确保 PgSQL 与 Neo4j 的别名保持同步
deduped_aliases = self._extract_deduped_entity_aliases(entity_nodes)
# 1.6 从 Neo4j 查询已有的 AI 助手别名,作为额外的排除源
# (防止 LLM 未提取出 AI 助手实体时AI 别名泄漏到用户别名中)
neo4j_assistant_aliases = await self._fetch_neo4j_assistant_aliases(end_user_id)
@@ -1390,19 +1429,12 @@ class ExtractionOrchestrator:
]
if len(current_aliases) < before_count:
logger.info(f"通过 Neo4j AI 助手别名排除了 {before_count - len(current_aliases)} 个误归属别名")
# 同样过滤 deduped_aliases
deduped_aliases = [
a for a in deduped_aliases
if a.strip().lower() not in neo4j_assistant_aliases
]
if not current_aliases and not deduped_aliases:
if not current_aliases:
logger.debug(f"本轮未提取到用户别名,跳过同步: end_user_id={end_user_id}")
return
logger.info(f"本轮对话提取的 aliases: {current_aliases}")
if deduped_aliases:
logger.info(f"去重后实体的完整 aliases含历史: {deduped_aliases}")
# 2. 同步到数据库
end_user_uuid = uuid.UUID(end_user_id)
@@ -1413,21 +1445,15 @@ class ExtractionOrchestrator:
logger.warning(f"未找到 end_user_id={end_user_id} 的用户记录")
return
# 3. 从 PgSQL 读取已有 aliases 并与本轮合并
# 3. 从 PgSQL 读取已有 aliases 并与本轮新增合并
info = EndUserInfoRepository(db).get_by_end_user_id(end_user_uuid)
db_aliases = (info.aliases if info and info.aliases else [])
# 过滤掉占位名称
db_aliases = [a for a in db_aliases if a.strip().lower() not in self.USER_PLACEHOLDER_NAMES]
# 合并:已有 + 去重后完整别名 + 本轮新增,去重保序
# 合并:PgSQL 已有 + 本轮新增,去重保序(不再合并 Neo4j 历史别名)
merged_aliases = list(db_aliases)
seen_lower = {a.strip().lower() for a in merged_aliases}
# 先合并去重后实体的完整别名(含 Neo4j 历史别名)
for alias in deduped_aliases:
if alias.strip().lower() not in seen_lower:
merged_aliases.append(alias)
seen_lower.add(alias.strip().lower())
# 再合并本轮新提取的别名
for alias in current_aliases:
if alias.strip().lower() not in seen_lower:
merged_aliases.append(alias)
@@ -1461,16 +1487,13 @@ class ExtractionOrchestrator:
info.aliases = merged_aliases
logger.info(f"同步合并后 aliases 到 end_user_info: {merged_aliases}")
else:
first_alias = current_aliases[0].strip() if current_aliases else (
deduped_aliases[0].strip() if deduped_aliases else ""
)
first_alias = current_aliases[0].strip() if current_aliases else ""
# 确保 first_alias 不是占位名称
if first_alias and first_alias.lower() not in self.USER_PLACEHOLDER_NAMES:
db.add(EndUserInfo(
end_user_id=end_user_uuid,
other_name=first_alias,
aliases=merged_aliases,
meta_data={}
))
logger.info(f"创建 end_user_info 记录other_name={first_alias}, aliases={merged_aliases}")
@@ -1478,9 +1501,6 @@ class ExtractionOrchestrator:
except Exception as e:
logger.error(f"更新 end_user other_name 失败: {e}", exc_info=True)
# 用户实体占位名称,不允许作为 other_name 或出现在 aliases 中
# 复用 deduped_and_disamb 模块级常量,避免重复维护
USER_PLACEHOLDER_NAMES = _USER_PLACEHOLDER_NAMES
@@ -1587,7 +1607,6 @@ class ExtractionOrchestrator:
if candidate and candidate.lower() in self.USER_PLACEHOLDER_NAMES:
return None
return candidate
return None
async def _run_dedup_and_write_summary(

View File

@@ -0,0 +1,175 @@
"""
Metadata extractor module.
Collects user-related statements from post-dedup graph data and
extracts user metadata via an independent LLM call.
"""
import logging
from typing import List, Optional
from app.core.memory.models.graph_models import (
ExtractedEntityNode,
StatementEntityEdge,
StatementNode,
)
logger = logging.getLogger(__name__)
# Reuse the same user-entity detection logic from dedup module
_USER_NAMES = {"用户", "", "user", "i"}
_CANONICAL_USER_TYPE = "用户"
def _is_user_entity(ent: ExtractedEntityNode) -> bool:
"""判断实体是否为用户实体"""
name = (getattr(ent, "name", "") or "").strip().lower()
etype = (getattr(ent, "entity_type", "") or "").strip()
return name in _USER_NAMES or etype == _CANONICAL_USER_TYPE
class MetadataExtractor:
"""Extracts user metadata from post-dedup graph data via independent LLM call."""
def __init__(self, llm_client, language: Optional[str] = None):
self.llm_client = llm_client
self.language = language
@staticmethod
def detect_language(statements: List[str]) -> str:
"""根据 statement 文本内容检测语言。
如果文本中包含中文字符则返回 "zh",否则返回 "en"
"""
import re
combined = " ".join(statements)
if re.search(r"[\u4e00-\u9fff]", combined):
return "zh"
return "en"
def collect_user_related_statements(
self,
entity_nodes: List[ExtractedEntityNode],
statement_nodes: List[StatementNode],
statement_entity_edges: List[StatementEntityEdge],
) -> List[str]:
"""
从去重后的数据中筛选与用户直接相关且由用户发言的 statement 文本。
筛选逻辑:
1. 用户实体 → StatementEntityEdge → statement直接关联
2. 只保留 speaker="user" 的 statement过滤 assistant 回复的噪声)
Returns:
用户发言的 statement 文本列表
"""
# Find user entity IDs
user_entity_ids = set()
for ent in entity_nodes:
if _is_user_entity(ent):
user_entity_ids.add(ent.id)
if not user_entity_ids:
logger.debug("未找到用户实体节点,跳过 statement 收集")
return []
# 用户实体 → StatementEntityEdge → statement
target_stmt_ids = set()
for edge in statement_entity_edges:
if edge.target in user_entity_ids:
target_stmt_ids.add(edge.source)
# Collect: only speaker="user" statements, preserving order
result = []
seen = set()
total_associated = 0
skipped_non_user = 0
for stmt_node in statement_nodes:
if stmt_node.id in target_stmt_ids and stmt_node.id not in seen:
total_associated += 1
speaker = getattr(stmt_node, "speaker", None) or "unknown"
if speaker == "user":
text = (stmt_node.statement or "").strip()
if text:
result.append(text)
else:
skipped_non_user += 1
seen.add(stmt_node.id)
logger.info(
f"收集到 {len(result)} 条用户发言 statement "
f"(直接关联: {total_associated}, speaker=user: {len(result)}, "
f"跳过非user: {skipped_non_user})"
)
if result:
for i, text in enumerate(result):
logger.info(f" [user statement {i + 1}] {text}")
if total_associated > 0 and len(result) == 0:
logger.warning(
f"{total_associated} 条直接关联 statement 但全部被 speaker 过滤,"
f"可能本次写入不包含 user 消息"
)
return result
async def extract_metadata(
self,
statements: List[str],
existing_metadata: Optional[dict] = None,
existing_aliases: Optional[List[str]] = None,
) -> Optional[tuple]:
"""
对筛选后的 statement 列表调用 LLM 提取元数据和用户别名。
Args:
statements: 用户发言的 statement 文本列表
existing_metadata: 数据库已有的元数据(可选)
existing_aliases: 数据库已有的用户别名列表(可选)
Returns:
(UserMetadata, List[str], List[str]) tuple: (metadata, aliases_to_add, aliases_to_remove) on success, None on failure
"""
if not statements:
return None
try:
from app.core.memory.utils.prompt.prompt_utils import prompt_env
if self.language:
detected_language = self.language
logger.info(f"元数据提取使用显式指定语言: {detected_language}")
else:
detected_language = self.detect_language(statements)
logger.info(f"元数据提取语言自动检测结果: {detected_language}")
template = prompt_env.get_template("extract_user_metadata.jinja2")
prompt = template.render(
statements=statements,
language=detected_language,
existing_metadata=existing_metadata,
existing_aliases=existing_aliases,
json_schema="",
)
from app.core.memory.models.metadata_models import (
MetadataExtractionResponse,
)
response = await self.llm_client.response_structured(
messages=[{"role": "user", "content": prompt}],
response_model=MetadataExtractionResponse,
)
if response:
metadata = response.user_metadata if response.user_metadata else None
to_add = response.aliases_to_add if response.aliases_to_add else []
to_remove = (
response.aliases_to_remove if response.aliases_to_remove else []
)
return metadata, to_add, to_remove
logger.warning("LLM 返回的响应为空")
return None
except Exception as e:
logger.error(f"元数据提取 LLM 调用失败: {e}", exc_info=True)
return None

View File

@@ -1,6 +1,5 @@
import asyncio
import logging
import os
from datetime import datetime
from typing import Any, Dict, List, Optional
@@ -82,6 +81,7 @@ class StatementExtractor:
logger.warning(f"Chunk {getattr(chunk, 'id', 'unknown')} has no speaker field or is empty")
return None
async def _extract_statements(self, chunk, end_user_id: Optional[str] = None, dialogue_content: str = None) -> List[Statement]:
"""Process a single chunk and return extracted statements
@@ -94,7 +94,8 @@ class StatementExtractor:
List of ExtractedStatement objects extracted from the chunk
"""
chunk_content = chunk.content
chunk_speaker = self._get_speaker_from_chunk(chunk)
if not chunk_content or len(chunk_content.strip()) < 5:
logger.warning(f"Chunk {chunk.id} content too short or empty, skipping")
return []
@@ -149,8 +150,6 @@ class StatementExtractor:
relevence_info = RelevenceInfo[relevence_str] if relevence_str in RelevenceInfo.__members__ else RelevenceInfo.RELEVANT
except (KeyError, ValueError):
relevence_info = RelevenceInfo.RELEVANT
chunk_speaker = self._get_speaker_from_chunk(chunk)
chunk_statement = Statement(
statement=extracted_stmt.statement,

View File

@@ -1,4 +1,3 @@
import os
import asyncio
from typing import List, Dict, Optional

View File

@@ -42,22 +42,21 @@ class AccessHistoryManager:
- access_count: 访问次数
特性:
- 原子性更新:使用Neo4j事务确保所有字段同时更新或回滚
- 并发安全:使用乐观锁机制防止并发冲突
- 原子性更新:使用 APOC 原子操作确保并发安全
- 批次内合并:同一批次中对同一节点的多次访问合并为一次更新
- 一致性保证:提供一致性检查和自动修复功能
- 智能修剪:自动修剪过长的访问历史
Attributes:
connector: Neo4j连接器实例
actr_calculator: ACT-R激活值计算器实例
max_retries: 并发冲突时的最大重试次数
"""
def __init__(
self,
connector: Neo4jConnector,
actr_calculator: ACTRCalculator,
max_retries: int = 3
max_retries: int = 5
):
"""
初始化访问历史管理器
@@ -65,47 +64,35 @@ class AccessHistoryManager:
Args:
connector: Neo4j连接器实例
actr_calculator: ACT-R激活值计算器实例
max_retries: 并发冲突时的最大重试次数默认3次
max_retries: 已废弃保留参数兼容性APOC 原子操作无需重试
"""
self.connector = connector
self.actr_calculator = actr_calculator
self.max_retries = max_retries
async def record_access(
self,
node_id: str,
node_label: str,
end_user_id: Optional[str] = None,
current_time: Optional[datetime] = None
current_time: Optional[datetime] = None,
access_times: int = 1
) -> Dict[str, Any]:
"""
记录节点访问并原子性更新所有相关字段
这是核心方法,实现了:
1. 首次访问初始化access_history计算初始激活值
2. 后续访问:追加访问历史,重新计算激活值
3. 历史修剪:当历史过长时自动修剪
4. 原子性:所有字段在单个事务中更新
5. 并发安全:使用乐观锁重试机制
Args:
node_id: 节点ID
node_label: 节点标签Statement, ExtractedEntity, MemorySummary
end_user_id: 组ID可选用于过滤
current_time: 当前时间(可选,默认使用系统时间)
access_times: 本次访问次数默认1批量合并时可能大于1
Returns:
Dict[str, Any]: 更新后的节点数据,包含:
- id: 节点ID
- activation_value: 更新后的激活值
- access_history: 更新后的访问历史
- last_access_time: 最后访问时间
- access_count: 访问次数
- importance_score: 重要性分数
Dict[str, Any]: 更新后的节点数据
Raises:
ValueError: 如果节点不存在或节点标签无效
RuntimeError: 如果重试次数耗尽仍然失败
RuntimeError: 如果更新失败
"""
if current_time is None:
current_time = datetime.now()
@@ -119,55 +106,48 @@ class AccessHistoryManager:
f"Invalid node_label: {node_label}. Must be one of {valid_labels}"
)
# 使用乐观锁重试机制处理并发冲突
for attempt in range(self.max_retries):
try:
# 步骤1读取当前节点状态
node_data = await self._fetch_node(node_id, node_label, end_user_id)
if not node_data:
raise ValueError(
f"Node not found: {node_label} with id={node_id}"
)
# 步骤2计算新的访问历史和激活值
update_data = await self._calculate_update(
node_data=node_data,
current_time=current_time,
current_time_iso=current_time_iso
try:
# 步骤1读取当前节点状态
node_data = await self._fetch_node(node_id, node_label, end_user_id)
if not node_data:
raise ValueError(
f"Node not found: {node_label} with id={node_id}"
)
# 步骤3原子性更新节点使用事务
updated_node = await self._atomic_update(
node_id=node_id,
node_label=node_label,
update_data=update_data,
end_user_id=end_user_id
)
logger.info(
f"成功记录访问: {node_label}[{node_id}], "
f"activation={update_data['activation_value']:.4f}, "
f"access_count={update_data['access_count']}"
)
return updated_node
except Exception as e:
if attempt < self.max_retries - 1:
logger.warning(
f"访问记录失败(尝试 {attempt + 1}/{self.max_retries}: {str(e)}"
)
continue
else:
logger.error(
f"访问记录失败,重试次数耗尽: {node_label}[{node_id}], "
f"错误: {str(e)}"
)
raise RuntimeError(
f"Failed to record access after {self.max_retries} attempts: {str(e)}"
)
# 步骤2计算新的访问历史和激活值
update_data = await self._calculate_update(
node_data=node_data,
current_time=current_time,
current_time_iso=current_time_iso,
access_times=access_times
)
# 步骤3使用 APOC 原子操作更新节点(无需重试)
updated_node = await self._atomic_update(
node_id=node_id,
node_label=node_label,
update_data=update_data,
end_user_id=end_user_id
)
logger.info(
f"成功记录访问: {node_label}[{node_id}], "
f"activation={update_data['activation_value']:.4f}, "
f"access_count={update_data['access_count']}"
f"{f', 合并访问次数={access_times}' if access_times > 1 else ''}"
)
return updated_node
except Exception as e:
logger.error(
f"访问记录失败: {node_label}[{node_id}], 错误: {str(e)}"
)
raise RuntimeError(
f"Failed to record access: {str(e)}"
) from e
async def record_batch_access(
self,
node_ids: List[str],
@@ -178,11 +158,10 @@ class AccessHistoryManager:
"""
批量记录多个节点的访问
为提高性能,批量更新多个节点的访问历史
每个节点独立更新,失败的节点不影响其他节点。
对同一个节点的多次访问会先在内存中合并,只发起一次更新
Args:
node_ids: 节点ID列表
node_ids: 节点ID列表可包含重复ID
node_label: 节点标签(所有节点必须是同一类型)
end_user_id: 组ID可选
current_time: 当前时间(可选)
@@ -196,25 +175,38 @@ class AccessHistoryManager:
if current_time is None:
current_time = datetime.now()
# PERFORMANCE FIX: Process all nodes in parallel instead of sequentially
tasks = []
# 合并同一节点的访问次数,避免对同一节点并发写入
access_count_map: Dict[str, int] = {}
for node_id in node_ids:
access_count_map[node_id] = access_count_map.get(node_id, 0) + 1
merged_count = len(node_ids) - len(access_count_map)
if merged_count > 0:
logger.info(
f"批量访问合并: 原始={len(node_ids)}, "
f"去重后={len(access_count_map)}, 合并={merged_count}"
)
# 对去重后的节点并行发起更新
tasks = []
for node_id, access_times in access_count_map.items():
task = self.record_access(
node_id=node_id,
node_label=node_label,
end_user_id=end_user_id,
current_time=current_time
current_time=current_time,
access_times=access_times
)
tasks.append(task)
tasks.append((node_id, task))
# Execute all tasks in parallel
task_results = await asyncio.gather(*tasks, return_exceptions=True)
task_results = await asyncio.gather(
*[t for _, t in tasks], return_exceptions=True
)
# Collect successful results and count failures
results = []
failed_count = 0
for node_id, result in zip(node_ids, task_results):
for (node_id, _), result in zip(tasks, task_results):
if isinstance(result, Exception):
failed_count += 1
logger.warning(
@@ -225,12 +217,12 @@ class AccessHistoryManager:
batch_duration = time.time() - batch_start
logger.info(
f"[PERF] 批量访问记录完成: 成功 {len(results)}/{len(node_ids)}, "
f"[PERF] 批量访问记录完成: 成功 {len(results)}/{len(access_count_map)}, "
f"失败 {failed_count}, 耗时 {batch_duration:.4f}s"
)
return results
async def check_consistency(
self,
node_id: str,
@@ -239,22 +231,6 @@ class AccessHistoryManager:
) -> Tuple[ConsistencyCheckResult, Optional[str]]:
"""
检查节点数据的一致性
验证以下一致性规则:
1. access_history[-1] == last_access_time
2. len(access_history) == access_count
3. 如果有访问历史,必须有激活值
4. 激活值必须在有效范围内 [offset, 1.0]
Args:
node_id: 节点ID
node_label: 节点标签
end_user_id: 组ID可选
Returns:
Tuple[ConsistencyCheckResult, Optional[str]]:
- 一致性检查结果枚举
- 错误描述(如果不一致)
"""
node_data = await self._fetch_node(node_id, node_label, end_user_id)
@@ -266,7 +242,6 @@ class AccessHistoryManager:
access_count = node_data.get('access_count', 0)
activation_value = node_data.get('activation_value')
# 检查1access_history[-1] == last_access_time
if access_history and last_access_time:
if access_history[-1] != last_access_time:
return (
@@ -275,7 +250,6 @@ class AccessHistoryManager:
f"last_access_time={last_access_time}"
)
# 检查2len(access_history) == access_count
if len(access_history) != access_count:
return (
ConsistencyCheckResult.INCONSISTENT_HISTORY_COUNT,
@@ -283,14 +257,12 @@ class AccessHistoryManager:
f"access_count={access_count}"
)
# 检查3有访问历史必须有激活值
if access_history and activation_value is None:
return (
ConsistencyCheckResult.MISSING_ACTIVATION,
"Node has access_history but activation_value is None"
)
# 检查4激活值范围
if activation_value is not None:
offset = self.actr_calculator.offset
if not (offset <= activation_value <= 1.0):
@@ -301,30 +273,14 @@ class AccessHistoryManager:
)
return ConsistencyCheckResult.CONSISTENT, None
async def check_batch_consistency(
self,
node_label: str,
end_user_id: Optional[str] = None,
limit: int = 1000
) -> Dict[str, Any]:
"""
批量检查多个节点的一致性
Args:
node_label: 节点标签
end_user_id: 组ID可选
limit: 检查的最大节点数
Returns:
Dict[str, Any]: 一致性检查报告,包含:
- total_checked: 检查的节点总数
- consistent_count: 一致的节点数
- inconsistent_count: 不一致的节点数
- inconsistencies: 不一致节点的详细信息列表
- consistency_rate: 一致性率0-1
"""
# 查询所有相关节点
"""批量检查多个节点的一致性"""
query = f"""
MATCH (n:{node_label})
WHERE n.access_history IS NOT NULL
@@ -343,7 +299,6 @@ class AccessHistoryManager:
results = await self.connector.execute_query(query, **params)
node_ids = [r['id'] for r in results]
# 检查每个节点
inconsistencies = []
consistent_count = 0
@@ -382,32 +337,15 @@ class AccessHistoryManager:
)
return report
async def repair_inconsistency(
self,
node_id: str,
node_label: str,
end_user_id: Optional[str] = None
) -> bool:
"""
自动修复节点的数据不一致问题
修复策略:
1. 如果access_history[-1] != last_access_time使用access_history[-1]
2. 如果len(access_history) != access_count使用len(access_history)
3. 如果有历史但无激活值:重新计算激活值
4. 如果激活值超出范围:重新计算激活值
Args:
node_id: 节点ID
node_label: 节点标签
end_user_id: 组ID可选
Returns:
bool: 修复成功返回True否则返回False
"""
"""自动修复节点的数据不一致问题"""
try:
# 检查一致性
result, message = await self.check_consistency(
node_id=node_id,
node_label=node_label,
@@ -418,7 +356,6 @@ class AccessHistoryManager:
logger.info(f"节点数据一致,无需修复: {node_label}[{node_id}]")
return True
# 获取节点数据
node_data = await self._fetch_node(node_id, node_label, end_user_id)
if not node_data:
logger.error(f"节点不存在,无法修复: {node_label}[{node_id}]")
@@ -427,17 +364,13 @@ class AccessHistoryManager:
access_history = node_data.get('access_history') or []
importance_score = node_data.get('importance_score', 0.5)
# 准备修复数据
repair_data = {}
# 修复last_access_time
if access_history:
repair_data['last_access_time'] = access_history[-1]
# 修复access_count
repair_data['access_count'] = len(access_history)
# 修复activation_value
if access_history:
current_time = datetime.now()
last_access_dt = datetime.fromisoformat(access_history[-1])
@@ -453,7 +386,6 @@ class AccessHistoryManager:
)
repair_data['activation_value'] = activation_value
# 执行修复
query = f"""
MATCH (n:{node_label} {{id: $node_id}})
"""
@@ -484,26 +416,16 @@ class AccessHistoryManager:
f"修复节点失败: {node_label}[{node_id}], 错误: {str(e)}"
)
return False
# ==================== 私有辅助方法 ====================
async def _fetch_node(
self,
node_id: str,
node_label: str,
end_user_id: Optional[str] = None
) -> Optional[Dict[str, Any]]:
"""
获取节点数据
Args:
node_id: 节点ID
node_label: 节点标签
end_user_id: 组ID可选
Returns:
Optional[Dict[str, Any]]: 节点数据如果不存在返回None
"""
"""获取节点数据"""
query = f"""
MATCH (n:{node_label} {{id: $node_id}})
"""
@@ -527,12 +449,13 @@ class AccessHistoryManager:
if results:
return results[0]
return None
async def _calculate_update(
self,
node_data: Dict[str, Any],
current_time: datetime,
current_time_iso: str
current_time_iso: str,
access_times: int = 1
) -> Dict[str, Any]:
"""
计算更新数据
@@ -541,45 +464,40 @@ class AccessHistoryManager:
node_data: 当前节点数据
current_time: 当前时间datetime对象
current_time_iso: 当前时间ISO格式字符串
access_times: 本次访问次数合并后可能大于1
Returns:
Dict[str, Any]: 更新数据,包含所有需要更新的字段
Dict[str, Any]: 更新数据
"""
access_history = node_data.get('access_history') or []
# Handle None importance_score - default to 0.5
importance_score = node_data.get('importance_score')
if importance_score is None:
importance_score = 0.5
# 追加新的访问时间
new_access_history = access_history + [current_time_iso]
# 本次新增的时间
new_timestamps = [current_time_iso] * access_times
# 修剪访问历史(如果过长)
access_history_dt = [
datetime.fromisoformat(ts) for ts in new_access_history
]
# 仅用本次新增的访问记录计算激活值
new_history_dt = [current_time] * access_times
trimmed_history_dt = self.actr_calculator.trim_access_history(
access_history=access_history_dt,
access_history=new_history_dt,
current_time=current_time
)
trimmed_history = [ts.isoformat() for ts in trimmed_history_dt]
# 计算新的激活值
activation_value = self.actr_calculator.calculate_memory_activation(
access_history=trimmed_history_dt,
current_time=current_time,
last_access_time=current_time, # 最后访问时间就是当前时间
last_access_time=current_time,
importance_score=importance_score
)
# 返回所有需要更新的字段
return {
'activation_value': activation_value,
'access_history': trimmed_history,
'new_timestamps': new_timestamps,
'access_count_delta': access_times,
'access_count': len(trimmed_history_dt),
'last_access_time': current_time_iso,
'access_count': len(trimmed_history)
}
async def _atomic_update(
self,
node_id: str,
@@ -588,10 +506,10 @@ class AccessHistoryManager:
end_user_id: Optional[str] = None
) -> Dict[str, Any]:
"""
原子性更新节点(使用乐观锁
原子性更新节点(使用 APOC 原子操作
使用Neo4j事务和版本号确保所有字段同时更新或回滚。
实现乐观锁机制防止并发冲突
使用 apoc.atomic.add 和 apoc.atomic.insert 保证并发安全,
无需 version 字段和乐观锁,数据库层面保证原子性
Args:
node_id: 节点ID
@@ -603,126 +521,68 @@ class AccessHistoryManager:
Dict[str, Any]: 更新后的节点数据
Raises:
RuntimeError: 如果更新失败或发生版本冲突
RuntimeError: 如果更新失败
"""
# 定义事务函数
async def update_transaction(tx, node_id, node_label, update_data, end_user_id):
# 步骤1读取当前节点并获取版本号
read_query = f"""
MATCH (n:{node_label} {{id: $node_id}})
"""
if end_user_id:
read_query += " WHERE n.end_user_id = $end_user_id"
read_query += """
RETURN n.id as id,
n.version as version,
n.activation_value as activation_value,
n.access_history as access_history,
n.last_access_time as last_access_time,
n.access_count as access_count,
n.importance_score as importance_score
"""
content_field_map = {
'Statement': 'n.statement as statement',
'MemorySummary': 'n.content as content',
'ExtractedEntity': 'null as content_placeholder',
'Community': 'n.summary as summary'
}
if node_label not in content_field_map:
raise ValueError(
f"Unsupported node_label: {node_label}. "
f"Supported labels are: {list(content_field_map.keys())}"
)
content_field = content_field_map[node_label]
where_clause = ""
if end_user_id:
where_clause = " AND n.end_user_id = $end_user_id"
query = f"""
MATCH (n:{node_label} {{id: $node_id}})
WHERE true{where_clause}
CALL apoc.atomic.add(n, 'access_count', $access_count_delta, 5) YIELD oldValue AS old_count
WITH n
CALL (n) {{
UNWIND $new_timestamps AS ts
CALL apoc.atomic.insert(n, 'access_history', size(n.access_history), ts, 5) YIELD oldValue
RETURN count(*) AS inserted
}}
SET n.activation_value = $activation_value,
n.last_access_time = $last_access_time
RETURN n.id as id,
n.activation_value as activation_value,
n.access_history as access_history,
n.last_access_time as last_access_time,
n.access_count as access_count,
n.importance_score as importance_score,
{content_field}
"""
params = {
'node_id': node_id,
'access_count_delta': update_data['access_count_delta'],
'new_timestamps': update_data['new_timestamps'],
'activation_value': update_data['activation_value'],
'last_access_time': update_data['last_access_time'],
}
if end_user_id:
params['end_user_id'] = end_user_id
try:
results = await self.connector.execute_query(query, **params)
read_params = {'node_id': node_id}
if end_user_id:
read_params['end_user_id'] = end_user_id
read_result = await tx.run(read_query, **read_params)
current_node = await read_result.single()
if not current_node:
if not results:
raise RuntimeError(f"Node not found: {node_label}[{node_id}]")
# 获取当前版本号如果不存在则为0
current_version = current_node.get('version', 0) or 0
new_version = current_version + 1
# 步骤2使用乐观锁更新节点
# 根据节点类型构建完整的查询语句
content_field_map = {
'Statement': 'n.statement as statement',
'MemorySummary': 'n.content as content',
'ExtractedEntity': 'null as content_placeholder' # 占位符,后续会被过滤
}
# 显式检查节点类型,不支持的类型抛出错误
if node_label not in content_field_map:
raise ValueError(
f"Unsupported node_label: {node_label}. "
f"Supported labels are: {list(content_field_map.keys())}"
)
content_field = content_field_map[node_label]
# 构建 WHERE 子句
where_conditions = []
if end_user_id:
where_conditions.append("n.end_user_id = $end_user_id")
# 添加版本检查
if current_version > 0:
where_conditions.append("n.version = $current_version")
else:
where_conditions.append("(n.version IS NULL OR n.version = 0)")
where_clause = " AND ".join(where_conditions) if where_conditions else "true"
# 构建完整的更新查询
update_query = f"""
MATCH (n:{node_label} {{id: $node_id}})
WHERE {where_clause}
SET n.activation_value = $activation_value,
n.access_history = $access_history,
n.last_access_time = $last_access_time,
n.access_count = $access_count,
n.version = $new_version
RETURN n.id as id,
n.activation_value as activation_value,
n.access_history as access_history,
n.last_access_time as last_access_time,
n.access_count as access_count,
n.importance_score as importance_score,
n.version as version,
{content_field}
"""
update_params = {
'node_id': node_id,
'current_version': current_version,
'new_version': new_version,
'activation_value': update_data['activation_value'],
'access_history': update_data['access_history'],
'last_access_time': update_data['last_access_time'],
'access_count': update_data['access_count']
}
if end_user_id:
update_params['end_user_id'] = end_user_id
update_result = await tx.run(update_query, **update_params)
updated_node = await update_result.single()
if not updated_node:
raise RuntimeError(
f"Version conflict detected for {node_label}[{node_id}]. "
f"Expected version {current_version}, but node was modified by another transaction."
)
# 转换为字典并移除占位符字段
result_dict = dict(updated_node)
result_dict = dict(results[0])
result_dict.pop('content_placeholder', None)
return result_dict
# 执行事务
try:
result = await self.connector.execute_write_transaction(
update_transaction,
node_id=node_id,
node_label=node_label,
update_data=update_data,
end_user_id=end_user_id
)
return result
except Exception as e:
logger.error(
f"原子性更新失败: {node_label}[{node_id}], 错误: {str(e)}"

View File

@@ -5,7 +5,7 @@
使用Neo4j的全文索引进行高效的文本匹配。
"""
from typing import List, Dict, Any, Optional
from typing import List, Optional
from app.core.logging_config import get_memory_logger
from app.repositories.neo4j.neo4j_connector import Neo4jConnector
from app.core.memory.storage_services.search.search_strategy import SearchStrategy, SearchResult
@@ -74,7 +74,7 @@ class KeywordSearchStrategy(SearchStrategy):
# 调用底层的关键词搜索函数
results_dict = await search_graph(
connector=self.connector,
q=query_text,
query=query_text,
end_user_id=end_user_id,
limit=limit,
include=include_list

View File

@@ -22,7 +22,9 @@ def escape_lucene_query(query: str) -> str:
s = s.replace("\r", " ").replace("\n", " ").strip()
# Lucene reserved tokens/special characters
specials = ['&&', '||', '\\', '+', '-', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':']
# NOTE: '/' is the regex delimiter in Lucene — must be escaped to prevent
# TokenMgrError when the query contains unmatched slashes.
specials = ['&&', '||', '\\', '+', '-', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':', '/']
# Replace longer tokens first to avoid partial double-escaping
for token in sorted(specials, key=len, reverse=True):
s = s.replace(token, f"\\{token}")

View File

@@ -43,8 +43,9 @@ Each statement must be labeled as per the criteria mentioned below.
对话上下文和共指消解:
- 将每个陈述句归属于说出它的参与者。
- 如果参与者列表为说话者提供了名称(例如,"李雪(用户)"),请在提取的陈述句中使用具体名称("李雪"),而不是通用角色("用户"
- 将所有代词解析为对话上下文中的具体人物或实体
- **对于用户的发言:必须使用"用户"作为主语**,禁止将"用户"或"我"替换为用户的真实姓名或别名。例如,用户说"我叫张三"应提取为"用户叫张三",而不是"张三叫张三"
- 对于 AI 助手的发言:使用"助手"或"AI助手"作为主语
- 将所有代词解析为对话上下文中的具体人物或实体,但"我"必须解析为"用户"。
- 识别并将抽象引用解析为其具体名称(如果提到)。
- 将缩写和首字母缩略词扩展为其完整形式。
{% else %}
@@ -68,8 +69,9 @@ Context Resolution Requirements:
Conversational Context & Co-reference Resolution:
- Attribute every statement to the participant who uttered it.
- If the participant list provides a name for a speaker (e.g., "李雪 (用户)"), use the specific name ("李雪") in the extracted statement, not the generic role ("用户").
- Resolve all pronouns to the specific person or entity from the conversation's context.
- **For user's statements: always use "用户" (User) as the subject**. Do NOT replace "用户" or "I" with the user's real name or alias. For example, if the user says "I'm John", extract as "用户 is John", not "John is John".
- For AI assistant's statements: use "助手" or "AI助手" as the subject.
- Resolve all pronouns to the specific person or entity from the conversation's context, but "I"/"我" must always resolve to "用户".
- Identify and resolve abstract references to their specific names if mentioned.
- Expand abbreviations and acronyms to their full form.
{% endif %}
@@ -139,13 +141,13 @@ AI: "水彩画很有趣!水彩颜料通常由颜料与阿拉伯树胶等粘合
示例输出: {
"statements": [
{
"statement": "Sarah Chen 最近一直在尝试水彩画。",
"statement": "用户最近一直在尝试水彩画。",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
},
{
"statement": "Sarah Chen 画了一些花朵。",
"statement": "用户画了一些花朵。",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
@@ -157,13 +159,13 @@ AI: "水彩画很有趣!水彩颜料通常由颜料与阿拉伯树胶等粘合
"relevance": "IRRELEVANT"
},
{
"statement": "Sarah Chen 认为她的水彩画中的色彩组合可以改进。",
"statement": "用户认为她的水彩画中的色彩组合可以改进。",
"statement_type": "OPINION",
"temporal_type": "STATIC",
"relevance": "RELEVANT"
},
{
"statement": "Sarah Chen 真的很喜欢玫瑰和百合。",
"statement": "用户真的很喜欢玫瑰和百合。",
"statement_type": "FACT",
"temporal_type": "STATIC",
"relevance": "RELEVANT"
@@ -186,13 +188,13 @@ AI: "水彩画很有趣!水彩颜料通常由颜料和阿拉伯树胶等粘合
示例输出: {
"statements": [
{
"statement": "张曼婷最近在尝试水彩画。",
"statement": "用户最近在尝试水彩画。",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
},
{
"statement": "张曼婷画了一些花朵。",
"statement": "用户画了一些花朵。",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
@@ -204,13 +206,13 @@ AI: "水彩画很有趣!水彩颜料通常由颜料和阿拉伯树胶等粘合
"relevance": "IRRELEVANT"
},
{
"statement": "张曼婷觉得水彩画的色彩搭配还有提升的空间。",
"statement": "用户觉得水彩画的色彩搭配还有提升的空间。",
"statement_type": "OPINION",
"temporal_type": "STATIC",
"relevance": "RELEVANT"
},
{
"statement": "张曼婷很喜欢玫瑰和百合。",
"statement": "用户很喜欢玫瑰和百合。",
"statement_type": "FACT",
"temporal_type": "STATIC",
"relevance": "RELEVANT"
@@ -233,13 +235,13 @@ User: "I think the color combinations could use some improvement, but I really l
Example Output: {
"statements": [
{
"statement": "Sarah Chen has been trying watercolor painting recently.",
"statement": "用户 has been trying watercolor painting recently.",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
},
{
"statement": "Sarah Chen painted some flowers.",
"statement": "用户 painted some flowers.",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
@@ -251,13 +253,13 @@ Example Output: {
"relevance": "IRRELEVANT"
},
{
"statement": "Sarah Chen thinks the color combinations in her watercolor paintings could use some improvement.",
"statement": "用户 thinks the color combinations in her watercolor paintings could use some improvement.",
"statement_type": "OPINION",
"temporal_type": "STATIC",
"relevance": "RELEVANT"
},
{
"statement": "Sarah Chen really likes roses and lilies.",
"statement": "用户 really likes roses and lilies.",
"statement_type": "FACT",
"temporal_type": "STATIC",
"relevance": "RELEVANT"
@@ -280,13 +282,13 @@ AI: "水彩画很有趣!水彩颜料通常由颜料和阿拉伯树胶等粘合
Example Output: {
"statements": [
{
"statement": "张曼婷最近在尝试水彩画。",
"statement": "用户最近在尝试水彩画。",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
},
{
"statement": "张曼婷画了一些花朵。",
"statement": "用户画了一些花朵。",
"statement_type": "FACT",
"temporal_type": "DYNAMIC",
"relevance": "RELEVANT"
@@ -298,13 +300,13 @@ Example Output: {
"relevance": "IRRELEVANT"
},
{
"statement": "张曼婷觉得水彩画的色彩搭配还有提升的空间。",
"statement": "用户觉得水彩画的色彩搭配还有提升的空间。",
"statement_type": "OPINION",
"temporal_type": "STATIC",
"relevance": "RELEVANT"
},
{
"statement": "张曼婷很喜欢玫瑰和百合。",
"statement": "用户很喜欢玫瑰和百合。",
"statement_type": "FACT",
"temporal_type": "STATIC",
"relevance": "RELEVANT"

View File

@@ -406,4 +406,12 @@ Output:
- **⚠️ ALIASES ORDER: preserve temporal order of appearance**
- **🚨 MANDATORY FIELD: EVERY entity MUST include "aliases" field, even if empty array []**
**Output JSON structure:**
```json
{
"triplets": [...],
"entities": [...]
}
```
{{ json_schema }}

View File

@@ -0,0 +1,135 @@
===Task===
Extract user metadata from the following conversation statements spoken by the user.
{% if language == "zh" %}
**"三度原则"判断标准:**
- 复用度:该信息是否会被多个功能模块使用?
- 约束度:该信息是否会影响系统行为?
- 时效性:该信息是长期稳定的还是临时的?仅提取长期稳定信息。
**提取规则:**
- **只提取关于"用户本人"的画像信息**,忽略用户提到的第三方人物(如朋友、同事、家人)的信息
- 仅提取文本中明确提到的信息,不要推测
- 如果文本中没有可提取的用户画像信息,返回空的 user_metadata 对象
- **输出语言必须与输入文本的语言一致**(输入中文则输出中文值,输入英文则输出英文值)
{% if existing_metadata %}
**重要:合并已有元数据**
下方提供了数据库中已有的用户元数据。请结合用户最新发言,输出**合并后的完整元数据**
- 如果用户明确否定了已有信息(如"我不再教高中物理了"),在输出中**移除**该信息
- 如果用户提到了新信息,**添加**到对应字段中
- 如果已有信息未被用户否定,**保留**在输出中
- 标量字段(如 role、domain如果用户提到了新值用新值替换否则保留已有值
- 最终输出应该是完整的、合并后的元数据,不是增量
{% endif %}
**字段说明:**
- profile.role用户的职业或角色如 教师、医生、后端工程师
- profile.domain用户所在领域如 教育、医疗、软件开发
- profile.expertise用户擅长的技能或工具通用不限于编程如 Python、心理咨询、高中物理
- profile.interests用户主动表达兴趣的话题或领域标签
- behavioral_hints.learning_stage学习阶段初学者/中级/高级)
- behavioral_hints.preferred_depth偏好深度概览/技术细节/深入探讨)
- behavioral_hints.tone_preference语气偏好轻松随意/专业简洁/学术严谨)
- knowledge_tags用户涉及的知识领域标签
**用户别名变更(增量模式):**
- **aliases_to_add**:本次新发现的用户别名,包括:
* 用户主动自我介绍:如"我叫张三"、"我的名字是XX"、"我的网名是XX"
* 他人对用户的称呼:如"同事叫我陈哥"、"大家叫我小张"、"领导叫我老陈"
* 只提取原文中逐字出现的名字,严禁推测或创造
* 禁止提取:用户给 AI 取的名字、第三方人物自身的名字、"用户"/"我" 等占位词
* 如果没有新别名,返回空数组 `[]`
- **aliases_to_remove**:用户明确否认的别名,包括:
* 用户说"我不叫XX了"、"别叫我XX"、"我改名了不叫XX" → 将 XX 放入此数组
* **严格限制**:只将用户原文中**逐字提到**的被否认名字放入,不要推断关联的其他别名
* 例如:用户说"我不叫陈小刀了" → 只移除"陈小刀",不要移除"陈哥"、"老陈"等未被提及的别名
* 如果没有要移除的别名,返回空数组 `[]`
{% if existing_aliases %}
- 已有别名:{{ existing_aliases | tojson }}(仅供参考,不需要在输出中重复)
{% endif %}
{% else %}
**"Three-Degree Principle" criteria:**
- Reusability: Will this information be used by multiple functional modules?
- Constraint: Will this information affect system behavior?
- Timeliness: Is this information long-term stable or temporary? Only extract long-term stable information.
**Extraction rules:**
- **Only extract profile information about the user themselves**, ignore information about third parties (friends, colleagues, family) mentioned by the user
- Only extract information explicitly mentioned in the text, do not speculate
- If no user profile information can be extracted, return an empty user_metadata object
- **Output language must match the input text language**
{% if existing_metadata %}
**Important: Merge with existing metadata**
Existing user metadata from the database is provided below. Combine with the user's latest statements to output the **complete merged metadata**:
- If the user explicitly negates existing info (e.g. "I no longer teach high school physics"), **remove** it from output
- If the user mentions new info, **add** it to the corresponding field
- If existing info is not negated by the user, **keep** it in the output
- Scalar fields (e.g. role, domain): replace with new value if user mentions one; otherwise keep existing
- The final output should be the complete, merged metadata — not an incremental update
{% endif %}
**Field descriptions:**
- profile.role: User's occupation or role, e.g. teacher, doctor, software engineer
- profile.domain: User's domain, e.g. education, healthcare, software development
- profile.expertise: User's skills or tools (general, not limited to programming)
- profile.interests: Topics or domain tags the user actively expressed interest in
- behavioral_hints.learning_stage: Learning stage (beginner/intermediate/advanced)
- behavioral_hints.preferred_depth: Preferred depth (overview/detailed/deep dive)
- behavioral_hints.tone_preference: Tone preference (casual/professional/academic)
- knowledge_tags: Knowledge domain tags related to the user
**User alias changes (incremental mode):**
- **aliases_to_add**: Newly discovered user aliases from this conversation, including:
* User self-introductions: e.g. "I'm John", "My name is XX", "My username is XX"
* How others address the user: e.g. "My colleagues call me Johnny", "People call me Mike"
* Only extract names that appear VERBATIM in the text — never infer or fabricate
* Do NOT extract: names the user gives to the AI, third-party people's own names, placeholder words like "User"/"I"
* If no new aliases, return empty array `[]`
- **aliases_to_remove**: Aliases the user explicitly denies, including:
* User says "Don't call me XX anymore", "I'm not called XX", "I changed my name from XX" → put XX in this array
* **Strict rule**: Only include the exact name the user **verbatim mentions** as denied. Do NOT infer or remove related aliases
* Example: User says "I'm not called John anymore" → only remove "John", do NOT remove "Johnny", "J" or other related aliases not mentioned
* If no aliases to remove, return empty array `[]`
{% if existing_aliases %}
- Existing aliases: {{ existing_aliases | tojson }} (for reference only, do not repeat in output)
{% endif %}
{% endif %}
===User Statements===
{% for stmt in statements %}
- {{ stmt }}
{% endfor %}
{% if existing_metadata %}
===Existing User Metadata===
```json
{{ existing_metadata | tojson }}
```
{% endif %}
===Output Format===
Return a JSON object with the following structure:
```json
{
"user_metadata": {
"profile": {
"role": "",
"domain": "",
"expertise": [],
"interests": []
},
"behavioral_hints": {
"learning_stage": "",
"preferred_depth": "",
"tone_preference": ""
},
"knowledge_tags": []
},
"aliases_to_add": [],
"aliases_to_remove": []
}
```
{{ json_schema }}

View File

@@ -112,22 +112,23 @@ class RedBearModelFactory:
params["stream_usage"] = True
# 深度思考模式
is_streaming = bool(config.extra_params.get("streaming"))
if is_streaming and not config.is_omni:
if provider == ModelProvider.VOLCANO:
# 火山引擎深度思考仅流式调用支持,非流式时不传 thinking 参数
thinking_config: Dict[str, Any] = {
"type": "enabled" if config.deep_thinking else "disabled"
}
if config.deep_thinking and config.thinking_budget_tokens:
thinking_config["budget_tokens"] = config.thinking_budget_tokens
params["extra_body"] = {"thinking": thinking_config}
else:
# 始终显式传递 enable_thinking不支持该参数的模型如 DeepSeek-R1会直接忽略
model_kwargs: Dict[str, Any] = config.extra_params.get("model_kwargs", {})
model_kwargs["enable_thinking"] = config.deep_thinking
if config.deep_thinking and config.thinking_budget_tokens:
model_kwargs["thinking_budget"] = config.thinking_budget_tokens
params["model_kwargs"] = model_kwargs
if config.support_thinking:
if is_streaming and not config.is_omni:
if provider == ModelProvider.VOLCANO:
# 火山引擎深度思考仅流式调用支持,非流式时不传 thinking 参数
thinking_config: Dict[str, Any] = {
"type": "enabled" if config.deep_thinking else "disabled"
}
if config.deep_thinking and config.thinking_budget_tokens:
thinking_config["budget_tokens"] = config.thinking_budget_tokens
params["extra_body"] = {"thinking": thinking_config}
else:
# 始终显式传递 enable_thinking不支持该参数的模型如 DeepSeek-R1会直接忽略
model_kwargs: Dict[str, Any] = config.extra_params.get("model_kwargs", {})
model_kwargs["enable_thinking"] = config.deep_thinking
if config.deep_thinking and config.thinking_budget_tokens:
model_kwargs["thinking_budget"] = config.thinking_budget_tokens
params["model_kwargs"] = model_kwargs
return params
elif provider == ModelProvider.DASHSCOPE:
params = {
@@ -206,10 +207,15 @@ class RedBearModelFactory:
if provider in [ModelProvider.XINFERENCE, ModelProvider.GPUSTACK]:
return {
"model": config.model_name,
# "base_url": config.base_url,
"jina_api_key": config.api_key,
**config.extra_params
}
elif provider == ModelProvider.DASHSCOPE:
return {
"model": config.model_name,
"dashscope_api_key": config.api_key,
**config.extra_params
}
else:
raise BusinessException(f"不支持的提供商: {provider}", code=BizCode.PROVIDER_NOT_SUPPORTED)
@@ -265,6 +271,9 @@ def get_provider_rerank_class(provider: str):
if provider in [ModelProvider.XINFERENCE, ModelProvider.GPUSTACK]:
from langchain_community.document_compressors import JinaRerank
return JinaRerank
elif provider == ModelProvider.DASHSCOPE:
from langchain_community.document_compressors.dashscope_rerank import DashScopeRerank
return DashScopeRerank
# elif provider == ModelProvider.OLLAMA:
# from langchain_ollama import OllamaEmbeddings
# return OllamaEmbeddings

View File

@@ -36,9 +36,7 @@ class RedBearEmbeddings(Embeddings):
"base_url": config.base_url,
"api_key": config.api_key,
"timeout": httpx.Timeout(timeout=config.timeout, connect=60.0),
"max_retries": config.max_retries,
"check_embedding_ctx_length": False,
"encoding_format": "float"
"max_retries": config.max_retries
}
elif provider == ModelProvider.DASHSCOPE:
params = {

View File

@@ -76,5 +76,9 @@ class RedBearRerank(BaseDocumentCompressor):
from langchain_community.document_compressors import JinaRerank
model_instance: JinaRerank = self._model
return model_instance.rerank(documents=documents, query=query, top_n=top_n)
elif provider == ModelProvider.DASHSCOPE:
from langchain_community.document_compressors.dashscope_rerank import DashScopeRerank
model_instance: DashScopeRerank = self._model
return model_instance.rerank(documents=documents, query=query, top_n=top_n)
else:
raise ValueError(f"不支持的模型提供商: {provider}")

View File

@@ -672,10 +672,15 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
excel_parser = ExcelParser()
if parser_config.get("html4excel") and parser_config.get("html4excel").lower() == "true":
sections = [(_, "") for _ in excel_parser.html(binary, 12) if _]
parser_config["chunk_token_num"] = 0
else:
sections = [(_, "") for _ in excel_parser(binary) if _]
parser_config["chunk_token_num"] = 12800
callback(0.8, "Finish parsing.")
# Excel 每行直接作为一个 chunk不经过 naive_merge 避免被 delimiter 拆分
chunks = [s for s, _ in sections]
res.extend(tokenize_chunks(chunks, doc, is_english, None))
res.extend(embed_res)
res.extend(url_res)
return res
elif re.search(r"\.(txt|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|sql)$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")

View File

@@ -232,14 +232,14 @@ class RAGExcelParser:
t = str(ti[i].value) if i < len(ti) else ""
t += ("" if t else "") + str(c.value)
fields.append(t)
line = "; ".join(fields)
line = "\n".join(fields)
if sheetname.lower().find("sheet") < 0:
line += " ——" + sheetname
line += "\n——" + sheetname
res.append(line)
else:
# 只有表头的情况
if header_fields:
line = "; ".join(header_fields)
line = "\n".join(header_fields)
if sheetname.lower().find("sheet") < 0:
line += " ——" + sheetname
res.append(line)

View File

@@ -292,9 +292,10 @@ class MinerUParser(RAGPdfParser):
self.page_from = page_from
self.page_to = page_to
try:
with pdfplumber.open(fnm) if isinstance(fnm, (str, PathLike)) else pdfplumber.open(BytesIO(fnm)) as pdf:
self.pdf = pdf
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).original for _, p in enumerate(self.pdf.pages[page_from:page_to])]
with sys.modules[LOCK_KEY_pdfplumber]: # ← 加这一行,获取全局锁
with pdfplumber.open(fnm) if isinstance(fnm, (str, PathLike)) else pdfplumber.open(BytesIO(fnm)) as pdf:
self.pdf = pdf
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).original for _, p in enumerate(self.pdf.pages[page_from:page_to])]
except Exception as e:
self.page_images = None
self.total_page = 0

View File

@@ -50,7 +50,9 @@ class OpenAIEmbed(Base):
def encode(self, texts: list):
# OpenAI requires batch size <=16
batch_size = 16
texts = [truncate(t, 8191) for t in texts]
# Use 8000 instead of 8191 to leave safety margin for tokenizer differences
# between cl100k_base (used by truncate) and the actual embedding model
texts = [truncate(t, 8000) for t in texts]
ress = []
total_tokens = 0
for i in range(0, len(texts), batch_size):
@@ -63,7 +65,7 @@ class OpenAIEmbed(Base):
return np.array(ress), total_tokens
def encode_queries(self, text):
res = self.client.embeddings.create(input=[truncate(text, 8191)], model=self.model_name, encoding_format="float",extra_body={"drop_params": True})
res = self.client.embeddings.create(input=[truncate(text, 8000)], model=self.model_name, encoding_format="float",extra_body={"drop_params": True})
return np.array(res.data[0].embedding), self.total_token_count(res)
@@ -79,6 +81,7 @@ class LocalAIEmbed(Base):
def encode(self, texts: list):
batch_size = 16
texts = [truncate(t, 8000) for t in texts]
ress = []
for i in range(0, len(texts), batch_size):
res = self.client.embeddings.create(input=texts[i : i + batch_size], model=self.model_name)
@@ -173,6 +176,7 @@ class XinferenceEmbed(Base):
def encode(self, texts: list):
batch_size = 16
texts = [truncate(t, 8000) for t in texts]
ress = []
total_tokens = 0
for i in range(0, len(texts), batch_size):
@@ -188,7 +192,7 @@ class XinferenceEmbed(Base):
def encode_queries(self, text):
res = None
try:
res = self.client.embeddings.create(input=[text], model=self.model_name)
res = self.client.embeddings.create(input=[truncate(text, 8000)], model=self.model_name)
return np.array(res.data[0].embedding), self.total_token_count(res)
except Exception as _e:
log_exception(_e, res)

View File

@@ -28,6 +28,7 @@ from app.core.rag.common.float_utils import get_float
from app.core.rag.common.constants import PAGERANK_FLD, TAG_FLD
from app.core.rag.llm.chat_model import Base
from app.core.rag.llm.embedding_model import OpenAIEmbed
from app.services.model_service import ModelApiKeyService
import logging
logger = logging.getLogger(__name__)
@@ -114,9 +115,8 @@ def knowledge_retrieval(
# Use the specified reranker for re-ranking
if reranker_id:
try:
return rerank(db=db, reranker_id=reranker_id, query=query, docs=all_results, top_k=reranker_top_k)
all_results = rerank(db=db, reranker_id=reranker_id, query=query, docs=all_results, top_k=reranker_top_k)
except Exception as rerank_error:
# If reranker fails, log warning and continue with original results
logger.warning(
"Reranker failed, falling back to original results",
extra={
@@ -132,7 +132,10 @@ def knowledge_retrieval(
from app.core.rag.common.settings import kg_retriever
doc = kg_retriever.retrieval(question=query, workspace_ids=workspace_ids, kb_ids=kb_ids, emb_mdl=embedding_model, llm=chat_model)
if doc:
all_results.insert(0, doc)
all_results.insert(0, DocumentChunk(
page_content=doc.get("page_content", ""),
metadata=doc.get("metadata", {})
))
except Exception as graph_error:
print(f"Failed to retrieve from knowledge graph: {str(graph_error)}")
@@ -198,16 +201,18 @@ def _retrieve_for_knowledge(
workspace_ids.append(str(db_knowledge.workspace_id))
if not chat_model:
llm_key = ModelApiKeyService.get_available_api_key(db, db_knowledge.llm_id)
chat_model = Base(
key=db_knowledge.llm.api_keys[0].api_key,
model_name=db_knowledge.llm.api_keys[0].model_name,
base_url=db_knowledge.llm.api_keys[0].api_base,
key=llm_key.api_key,
model_name=llm_key.model_name,
base_url=llm_key.api_base,
)
if not embedding_model:
emb_key = ModelApiKeyService.get_available_api_key(db, db_knowledge.embedding_id)
embedding_model = OpenAIEmbed(
key=db_knowledge.embedding.api_keys[0].api_key,
model_name=db_knowledge.embedding.api_keys[0].model_name,
base_url=db_knowledge.embedding.api_keys[0].api_base,
key=emb_key.api_key,
model_name=emb_key.model_name,
base_url=emb_key.api_base,
)
vector_service = ElasticSearchVectorFactory().init_vector(knowledge=db_knowledge)
@@ -248,6 +253,29 @@ def _retrieve_for_knowledge(
seen_ids.add(doc.metadata["doc_id"])
unique_rs.append(doc)
rs = unique_rs
if unique_rs:
rs = vector_service.rerank(
query=kb_config["query"],
docs=unique_rs,
top_k=kb_config["top_k"]
)
if kb_config["retrieve_type"] == "graph":
try:
from app.core.rag.common.settings import kg_retriever
graph_doc = kg_retriever.retrieval(
question=kb_config["query"],
workspace_ids=[str(db_knowledge.workspace_id)],
kb_ids=[str(db_knowledge.id)],
emb_mdl=embedding_model,
llm=chat_model,
)
if graph_doc:
rs.insert(0, DocumentChunk(
page_content=graph_doc.get("page_content", ""),
metadata=graph_doc.get("metadata", {})
))
except Exception as graph_error:
logger.warning(f"Graph retrieval failed for kb {db_knowledge.id}: {graph_error}")
results.extend(rs)
return results, chat_model, embedding_model

View File

@@ -230,7 +230,7 @@ class DateTimeTool(BuiltinTool):
@staticmethod
def _datetime_to_timestamp(kwargs) -> dict:
"""日期时间转时间戳"""
input_value = kwargs.get("input_value")
input_value = kwargs.get("input_value").strip()
input_format = kwargs.get("input_format", "%Y-%m-%d %H:%M:%S")
timezone_str = kwargs.get("from_timezone", "Asia/Shanghai")
@@ -253,9 +253,9 @@ class DateTimeTool(BuiltinTool):
return {
"datetime": input_value,
"timezone": timezone_str,
"timestamp": int(dt.timestamp()),
"timestamp": int(dt.timestamp()) * 1000,
"iso_format": dt.isoformat(),
"result_data": int(dt.timestamp())
"result_data": int(dt.timestamp()) * 1000
}
def _calculate_datetime(self, kwargs) -> dict:

View File

@@ -0,0 +1,300 @@
"""OpenClaw 远程 Agent 内置工具"""
import time
import base64
from io import BytesIO
from typing import List, Dict, Any, Optional
import aiohttp
from app.core.tools.builtin.base import BuiltinTool
from app.schemas.tool_schema import ToolParameter, ToolResult, ParameterType
from app.core.logging_config import get_business_logger
logger = get_business_logger()
class OpenClawTool(BuiltinTool):
"""OpenClaw 远程 Agent 工具 — 支持文本和图片多模态输入"""
def __init__(self, tool_id: str, config: Dict[str, Any]):
super().__init__(tool_id, config)
params = self.parameters_config
# 用户配置项(前端表单填写)
self._server_url = params.get("server_url", "")
self._api_key = params.get("api_key", "")
self._agent_id = params.get("agent_id", "main")
# 内部默认值
self._model = "openclaw"
self._session_strategy = "by_user"
self._timeout = 120
# 运行时上下文(通过 set_runtime_context 注入)
self._user_id = "anonymous"
self._conversation_id = None
self._uploaded_files = []
@property
def name(self) -> str:
return "openclaw_tool"
@property
def description(self) -> str:
return (
"OpenClaw 远程 Agent将任务委托给远程 OpenClaw Agent。"
"具备 3D 模型生成与打印控制、设备管理、文件处理、浏览器自动化、"
"Shell 命令执行、网络搜索等能力。支持文本和图片多模态交互。"
)
def get_required_config_parameters(self) -> List[str]:
return ["server_url", "api_key"]
@property
def parameters(self) -> List[ToolParameter]:
return [
ToolParameter(
name="operation",
type=ParameterType.STRING,
description="任务类型",
required=True,
enum= ["print_task", "device_query", "image_understand", "general"]
),
ToolParameter(
name="message",
type=ParameterType.STRING,
description="发送给 OpenClaw Agent 的文本请求内容",
required=True
),
ToolParameter(
name="image_url",
type=ParameterType.STRING,
description="可选,附带的图片 URL 或 base64 data URIOpenClaw 支持图片输入)",
required=False
)
]
# ---------- 运行时上下文注入 ----------
def set_runtime_context(
self,
user_id: str = "anonymous",
conversation_id: Optional[str] = None,
uploaded_files: Optional[list] = None
):
"""注入运行时上下文(由 chat service 调用)"""
self._user_id = user_id
self._conversation_id = conversation_id
self._uploaded_files = uploaded_files or []
# ---------- 连接测试 ----------
async def test_connection(self) -> Dict[str, Any]:
"""测试 OpenClaw Gateway 连接"""
if not self._server_url:
return {"success": False, "message": "未配置 server_url"}
if not self._api_key:
return {"success": False, "message": "未配置 api_key"}
url = f"{self._server_url.rstrip('/')}/v1/responses"
headers = {
"Authorization": f"Bearer {self._api_key}",
"Content-Type": "application/json",
"x-openclaw-agent-id": self._agent_id
}
body = {
"model": self._model,
"user": "connection-test",
"input": "hi",
"stream": False
}
try:
timeout_cfg = aiohttp.ClientTimeout(total=30)
async with aiohttp.ClientSession(timeout=timeout_cfg) as session:
async with session.post(url, json=body, headers=headers) as resp:
if resp.status < 400:
return {"success": True, "message": "OpenClaw 连接成功"}
error_text = await resp.text()
return {
"success": False,
"message": f"OpenClaw HTTP {resp.status}: {error_text[:200]}"
}
except Exception as e:
return {"success": False, "message": f"OpenClaw 连接失败: {str(e)}"}
# ---------- 执行 ----------
async def execute(self, **kwargs) -> ToolResult:
"""执行 OpenClaw 调用"""
start_time = time.time()
try:
message = kwargs.get("message", "")
if not message:
return ToolResult.error_result(
error="message 参数不能为空",
error_code="OPENCLAW_INVALID_INPUT",
execution_time=time.time() - start_time
)
# 提取图片优先从用户上传文件中获取LLM 传的 image_url 作为兜底
image_url = self._extract_image_from_uploads()
if not image_url:
image_url = kwargs.get("image_url")
if image_url and not image_url.startswith("data:"):
image_url = await self._download_and_encode_image(image_url)
# 构建请求
url = f"{self._server_url.rstrip('/')}/v1/responses"
headers = {
"Authorization": f"Bearer {self._api_key}",
"Content-Type": "application/json",
"x-openclaw-agent-id": self._agent_id
}
user_field = (
f"conv-{self._conversation_id}"
if self._session_strategy == "by_conversation" and self._conversation_id
else f"user-{self._user_id}"
)
input_field = self._build_input(message, image_url)
body = {
"model": self._model,
"user": user_field,
"input": input_field,
"stream": False
}
timeout_cfg = aiohttp.ClientTimeout(total=self._timeout)
# 打印请求日志(截断 base64 避免日志过大)
log_body = {**body}
if isinstance(log_body.get("input"), list):
log_body["input"] = "[multimodal input, truncated]"
elif isinstance(log_body.get("input"), str) and len(log_body["input"]) > 500:
log_body["input"] = log_body["input"][:500] + "..."
logger.info(
f"OpenClaw 请求: url={url}, agent_id={self._agent_id}, "
f"has_image={bool(image_url)}, body={log_body}"
)
async with aiohttp.ClientSession(timeout=timeout_cfg) as session:
async with session.post(url, json=body, headers=headers) as resp:
execution_time = time.time() - start_time
if resp.status >= 400:
error_text = await resp.text()
return ToolResult.error_result(
error=f"OpenClaw HTTP {resp.status}: {error_text[:500]}",
error_code="OPENCLAW_HTTP_ERROR",
execution_time=execution_time
)
data = await resp.json()
text = self._extract_response(data)
display_text = self._format_result(text)
return ToolResult.success_result(
data=display_text,
execution_time=execution_time
)
except aiohttp.ClientError as e:
return ToolResult.error_result(
error=f"OpenClaw 网络连接失败: {str(e)}",
error_code="OPENCLAW_NETWORK_ERROR",
execution_time=time.time() - start_time
)
except Exception as e:
return ToolResult.error_result(
error=f"OpenClaw 调用失败: {str(e)}",
error_code="OPENCLAW_EXECUTION_ERROR",
execution_time=time.time() - start_time
)
# ---------- 私有方法 ----------
def _extract_image_from_uploads(self) -> Optional[str]:
"""从用户上传文件中提取图片 URL"""
for f in self._uploaded_files:
f_type = f.get("type", "")
if f_type == "image":
source = f.get("source", {})
if source.get("type") == "base64":
media_type = source.get("media_type", "image/jpeg")
data = source.get("data", "")
return f"data:{media_type};base64,{data}"
elif f.get("image"):
return f.get("image")
elif f.get("url"):
return f.get("url")
elif f_type == "image_url":
return f.get("image_url", {}).get("url", "")
return None
async def _download_and_encode_image(self, image_url: str) -> str:
"""下载图片并转为 base64 data URI"""
try:
from PIL import Image
MAX_RAW_SIZE = 4 * 1024 * 1024
async with aiohttp.ClientSession() as session:
async with session.get(
image_url, allow_redirects=True,
timeout=aiohttp.ClientTimeout(total=30)
) as resp:
if resp.status != 200:
return image_url
content_type = resp.headers.get("Content-Type", "image/jpeg")
if not content_type.startswith("image/"):
return image_url
img_bytes = await resp.read()
if len(img_bytes) > MAX_RAW_SIZE:
img = Image.open(BytesIO(img_bytes))
if img.mode in ("RGBA", "P", "LA"):
img = img.convert("RGB")
if max(img.size) > 2048:
img.thumbnail((2048, 2048), Image.LANCZOS)
buf = BytesIO()
img.save(buf, format="JPEG", quality=75, optimize=True)
img_bytes = buf.getvalue()
content_type = "image/jpeg"
b64 = base64.b64encode(img_bytes).decode("utf-8")
return f"data:{content_type};base64,{b64}"
except Exception as e:
logger.warning(f"OpenClaw 下载图片失败,使用原始 URL: {e}")
return image_url
def _build_input(self, message: str, image_url: Optional[str] = None):
"""构造请求 input 字段:有图片则构造多模态结构,否则纯文本"""
if not image_url:
return message
content_parts = [{"type": "input_text", "text": message}]
if image_url.startswith("data:"):
try:
header, data = image_url.split(",", 1)
media_type = header.split(":")[1].split(";")[0]
content_parts.append({
"type": "input_image",
"source": {"type": "base64", "media_type": media_type, "data": data}
})
except (ValueError, IndexError):
return message
else:
content_parts.append({
"type": "input_image",
"source": {"type": "url", "url": image_url}
})
return [{"type": "message", "role": "user", "content": content_parts}]
def _extract_response(self, response_data: Dict[str, Any]) -> str:
"""从 OpenClaw 响应中提取文本内容
OpenClaw /v1/responses 只返回 output_text 类型的内容。
图片信息(如有)由 OpenClaw Skill 以 Markdown 链接形式嵌入文本中返回。
"""
output = response_data.get("output", [])
texts = []
for item in output:
if item.get("type") == "message":
for content in item.get("content", []):
if content.get("type") == "output_text" and content.get("text"):
texts.append(content["text"])
return "\n".join(texts) if texts else str(response_data)
@staticmethod
def _format_result(text: str) -> str:
"""格式化结果为 LLM 可读字符串"""
return text or "OpenClaw 返回了空内容)"

View File

@@ -11,6 +11,11 @@ class OperationTool(BaseTool):
self.base_tool = base_tool
self.operation = operation
super().__init__(base_tool.tool_id, base_tool.config)
def set_runtime_context(self, **kwargs):
"""转发运行时上下文到 base_tool"""
if hasattr(self.base_tool, 'set_runtime_context'):
self.base_tool.set_runtime_context(**kwargs)
@property
def name(self) -> str:
@@ -32,6 +37,8 @@ class OperationTool(BaseTool):
return self._get_datetime_params()
elif self.base_tool.name == 'json_tool':
return self._get_json_params()
elif self.base_tool.name == 'openclaw_tool':
return self._get_openclaw_params()
else:
# 默认返回除operation外的所有参数
return [p for p in self.base_tool.parameters if p.name != "operation"]
@@ -138,6 +145,29 @@ class OperationTool(BaseTool):
default="Asia/Shanghai"
)
]
elif self.operation == "datetime_to_timestamp":
return [
ToolParameter(
name="input_value",
type=ParameterType.STRING,
description="输入值时间字符串2026-04-07 10:30:25",
required=True
),
ToolParameter(
name="input_format",
type=ParameterType.STRING,
description="输入时间格式(如:%Y-%m-%d %H:%M:%S",
required=False,
default="%Y-%m-%d %H:%M:%S"
),
ToolParameter(
name="from_timezone",
type=ParameterType.STRING,
description="源时区UTC, Asia/Shanghai",
required=False,
default="Asia/Shanghai"
)
]
else:
return []
@@ -209,6 +239,64 @@ class OperationTool(BaseTool):
else:
return base_params
def _get_openclaw_params(self) -> List[ToolParameter]:
"""获取 openclaw_tool 特定操作的参数"""
if self.operation == "print_task":
return [
ToolParameter(
name="message",
type=ParameterType.STRING,
description="发送给 OpenClaw 的打印任务描述,将用户的原始消息原封不动地传递给 OpenClaw禁止改写、补充或润色用户的原文",
required=True
),
ToolParameter(
name="image_url",
type=ParameterType.STRING,
description="可选附带的设计图片或参考图OpenClaw 可据此生成 3D 模型",
required=False
)
]
elif self.operation == "device_query":
return [
ToolParameter(
name="message",
type=ParameterType.STRING,
description="发送给 OpenClaw 的设备查询指令",
required=True
)
]
elif self.operation == "image_understand":
return [
ToolParameter(
name="message",
type=ParameterType.STRING,
description="发送给 OpenClaw 的图片理解任务,应描述需要对图片做什么(如描述内容、提取文字、分析信息)",
required=True
),
ToolParameter(
name="image_url",
type=ParameterType.STRING,
description="要分析的图片 URL 或 base64 data URI",
required=False
)
]
else:
# general 及其他
return [
ToolParameter(
name="message",
type=ParameterType.STRING,
description="发送给 OpenClaw Agent 的任务描述,应包含完整的任务需求",
required=True
),
ToolParameter(
name="image_url",
type=ParameterType.STRING,
description="可选,附带的图片 URL 或 base64 data URI",
required=False
)
]
async def execute(self, **kwargs) -> ToolResult:
"""执行特定操作"""
# 添加operation参数

View File

@@ -0,0 +1,15 @@
{
"name": "openclaw_tool",
"description": "调用OpenClaw Agent远程服务",
"tool_class": "OpenClawTool",
"category": "agent",
"requires_config": true,
"version": "1.0.0",
"enabled": true,
"parameters": {
"server_url": "",
"api_key": "",
"agent_id": "main"
},
"tags": ["agent", "openclaw", "multimodal", "3d-printing", "builtin"]
}

View File

@@ -30,5 +30,18 @@
"parameters": {
"api_key": {"type": "string", "description": "百度搜索API密钥", "sensitive": true, "required": true}
}
},
"openclaw": {
"name": "OpenClaw远程Agent",
"description": "OpenClaw Agent远程服务",
"tool_class": "OpenClawTool",
"category": "agent",
"requires_config": true,
"version": "1.0.0",
"enabled": true,
"parameters": {
"server_url": {"type": "string", "description": "OpenClaw Gateway 地址", "required": true},
"api_key": {"type": "string", "description": "OpenClaw API Key", "sensitive": true, "required": true}
}
}
}

View File

@@ -30,7 +30,7 @@ class CustomTool(BaseTool):
self.auth_config = config.get("auth_config", {})
self.base_url = config.get("base_url", "")
self.timeout = config.get("timeout", 30)
# 解析schema
self._parsed_operations = self._parse_openapi_schema()

View File

@@ -131,7 +131,7 @@ class LangchainAdapter:
def _tool_supports_operations(tool: BaseTool) -> bool:
"""检查工具是否支持多操作"""
# 内置工具中支持操作的工具
builtin_operation_tools = ['datetime_tool', 'json_tool']
builtin_operation_tools = ['datetime_tool', 'json_tool', 'openclaw_tool']
# 检查内置工具
if tool.tool_type.value == "builtin" and tool.name in builtin_operation_tools:

View File

@@ -40,6 +40,7 @@ class WorkflowParserResult(BaseModel):
edges: list[EdgeDefinition] = Field(default_factory=list)
nodes: list[NodeDefinition] = Field(default_factory=list)
variables: list[VariableDefinition] = Field(default_factory=list)
features: dict[str, Any] = Field(default_factory=dict)
warnings: list[ExceptionDefinition] = Field(default_factory=list)
errors: list[ExceptionDefinition] = Field(default_factory=list)
@@ -51,6 +52,7 @@ class WorkflowImportResult(BaseModel):
edges: list[EdgeDefinition] = Field(default_factory=list)
nodes: list[NodeDefinition] = Field(default_factory=list)
variables: list[VariableDefinition] = Field(default_factory=list)
features: dict[str, Any] = Field(default_factory=dict)
warnings: list[ExceptionDefinition] = Field(default_factory=list)
errors: list[ExceptionDefinition] = Field(default_factory=list)

View File

@@ -15,7 +15,7 @@ from app.core.workflow.adapters.errors import (
ExceptionType
)
from app.core.workflow.nodes.assigner.config import AssignmentItem
from app.core.workflow.nodes.base_config import VariableDefinition, BaseNodeConfig
from app.core.workflow.nodes.base_config import VariableDefinition as NodeVariableDefinition, BaseNodeConfig
from app.core.workflow.nodes.code.config import InputVariable, OutputVariable
from app.core.workflow.nodes.configs import (
StartNodeConfig,
@@ -36,6 +36,7 @@ from app.core.workflow.nodes.configs import (
ListOperatorNodeConfig,
DocExtractorNodeConfig,
)
from app.schemas.workflow_schema import VariableDefinition as SchemaVariableDefinition
from app.core.workflow.nodes.cycle_graph.config import (
ConditionDetail as LoopConditionDetail,
ConditionsConfig,
@@ -98,6 +99,7 @@ class DifyConverter(BaseConverter):
NodeType.CYCLE_START: lambda x: {},
NodeType.BREAK: lambda x: {},
}
self._file_vars_to_conv: list[SchemaVariableDefinition] = []
def get_node_convert(self, node_type):
func = self.CONFIG_CONVERT_MAP.get(node_type, lambda x: {})
@@ -286,19 +288,25 @@ class DifyConverter(BaseConverter):
)
continue
if var_type in ["file", "array[file]"]:
self.errors.append(
ExceptionDefinition(
type=ExceptionType.VARIABLE,
node_id=node["id"],
node_name=node_data["title"],
name=var["variable"],
detail=f"Unsupported Variable type for start node: {var_type}"
)
)
if var_type in [VariableType.FILE, VariableType.ARRAY_FILE]:
# 开始节点不支持文件变量,转为会话变量
self._file_vars_to_conv.append(SchemaVariableDefinition(
name=var["variable"],
type=var_type.value,
required=var.get("required", False),
default=None,
description=var.get("label", ""),
))
self.warnings.append(ExceptionDefinition(
type=ExceptionType.VARIABLE,
node_id=node["id"],
node_name=node_data["title"],
name=var["variable"],
detail=f"File variable '{var['variable']}' is not supported in start node, moved to conversation variables"
))
continue
var_def = VariableDefinition(
var_def = NodeVariableDefinition(
name=var["variable"],
type=var_type,
required=var["required"],
@@ -837,3 +845,76 @@ class DifyConverter(BaseConverter):
).model_dump()
self.config_validate(node["id"], node["data"]["title"], DocExtractorNodeConfig, result)
return result
@staticmethod
def convert_features(features: dict) -> dict:
"""Convert Dify features to MemoryBear FeaturesConfigForm format."""
if not features:
return {}
result: dict = {}
# opening_statement
opening = features.get("opening_statement", "")
suggested = features.get("suggested_questions", [])
result["opening_statement"] = {
"enabled": bool(opening),
"statement": opening or None,
"suggested_questions": suggested,
}
# citation (对应 Dify retriever_resource)
retriever = features.get("retriever_resource", {})
result["citation"] = {
"enabled": retriever.get("enabled", False) if isinstance(retriever, dict) else False,
}
# file_upload: Dify allowed_file_types 数组 -> 前端扁平字段
file_upload = features.get("file_upload", {})
allowed_types = file_upload.get("allowed_file_types", []) if file_upload else []
allowed_methods = file_upload.get("allowed_file_upload_methods", ["local_file", "remote_url"])
if isinstance(allowed_methods, list):
if len(allowed_methods) >= 2:
transfer_method = "both"
elif allowed_methods:
transfer_method = allowed_methods[0]
else:
transfer_method = "both"
else:
transfer_method = allowed_methods or "both"
file_config = file_upload.get("fileUploadConfig", {})
result["file_upload"] = {
"enabled": file_upload.get("enabled", False) if file_upload else False,
"image_enabled": "image" in allowed_types,
"image_max_size_mb": file_config.get("image_file_size_limit", 10) if file_config else 10,
"image_allowed_extensions": ["png", "jpg", "jpeg"],
"audio_enabled": "audio" in allowed_types,
"audio_max_size_mb": file_config.get("audio_file_size_limit", 50) if file_config else 50,
"audio_allowed_extensions": ["mp3", "wav", "m4a"],
"document_enabled": "document" in allowed_types,
"document_max_size_mb": file_config.get("file_size_limit", 100) if file_config else 100,
"document_allowed_extensions": ["pdf", "docx", "doc", "xlsx", "xls", "txt", "csv", "json", "md"],
"video_enabled": "video" in allowed_types,
"video_max_size_mb": file_config.get("video_file_size_limit", 100) if file_config else 100,
"video_allowed_extensions": ["mp4", "mov"],
"max_file_count": file_upload.get("number_limits", 1) if file_upload else 1,
"allowed_transfer_methods": transfer_method,
}
# text_to_speech
tts = features.get("text_to_speech", {})
result["text_to_speech"] = {
"enabled": tts.get("enabled", False) if isinstance(tts, dict) else False,
"voice": tts.get("voice") if isinstance(tts, dict) else None,
"language": tts.get("language") if isinstance(tts, dict) else None,
"autoplay": False,
}
# suggested_questions_after_answer
sqa = features.get("suggested_questions_after_answer", {})
result["suggested_questions_after_answer"] = {
"enabled": sqa.get("enabled", False) if isinstance(sqa, dict) else False,
}
return result

View File

@@ -119,9 +119,12 @@ class DifyAdapter(BasePlatformAdapter, DifyConverter):
if variable:
self.conv_variables.append(con_var)
# for variables in config.get("workflow").get("environment_variables"):
# variable = self._convert_variable(variables)
# conv_variables.append(variable)
# 开始节点的文件变量合并到会话变量
self.conv_variables.extend(self._file_vars_to_conv)
features = self.convert_features(
self.config.get("workflow", {}).get("features", {})
)
trigger = self._convert_trigger({})
execution_config = self._convert_execution({})
@@ -135,6 +138,7 @@ class DifyAdapter(BasePlatformAdapter, DifyConverter):
edges=self.edges,
nodes=self.nodes,
variables=self.conv_variables,
features=features,
warnings=self.warnings,
errors=self.errors
)

View File

@@ -31,9 +31,9 @@ logger = logging.getLogger(__name__)
# Example:
# "Hello {{user.name}}!" ->
# ["Hello ", "{{user.name}}", "!"]
_OUTPUT_PATTERN = re.compile(r'\{\{.*?}}|[^{}]+')
_OUTPUT_PATTERN = re.compile(r'\{\{.*?}}|[^{]+|{')
# Strict variable format: {{ node_id.field_name }}
_VARIABLE_PATTERN = re.compile(r'\{\{\s*[a-zA-Z0-9_]+\.[a-zA-Z0-9_]+\s*}}')
_VARIABLE_PATTERN = re.compile(r'\{\{\s*[a-zA-Z0-9_]+\.[a-zA-Z0-9_]+(?:\.[a-zA-Z0-9_]+)?\s*}}')
class GraphBuilder:

View File

@@ -14,7 +14,7 @@ from app.core.workflow.engine.variable_pool import VariablePool
logger = get_logger(__name__)
SCOPE_PATTERN = re.compile(
r"\{\{\s*([a-zA-Z0-9_]+)\.[a-zA-Z0-9_]+\s*}}"
r"\{\{\s*([a-zA-Z0-9_]+)\.[a-zA-Z0-9_]+(?:\.[a-zA-Z0-9_]+)?\s*}}"
)

View File

@@ -34,19 +34,22 @@ class LazyVariableDict:
return self._cache[key]
var_struct = self._source.get(key)
if var_struct is None:
raise KeyError(key)
value = var_struct.instance.to_literal() if self._literal else var_struct.instance.get_value()
return None
raw = var_struct.instance.get_value()
# literal 模式下 dict/list 保留结构,让 Jinja2 能继续访问子字段(如 .type
value = raw if (not self._literal or isinstance(raw, (dict, list))) else var_struct.instance.to_literal()
self._cache[key] = value
return value
def get(self, key, default=None):
try:
return self._resolve(key)
except KeyError:
return default
value = self._resolve(key)
return default if value is None else value
def __getitem__(self, key):
return self._resolve(key)
value = self._resolve(key)
if value is None:
raise KeyError(key)
return value
def __getattr__(self, key):
if key.startswith('_'):
@@ -164,7 +167,7 @@ class VariablePool:
def transform_selector(selector):
variable_literal = VARIABLE_PATTERN.sub(r"\1", selector).strip()
selector = VariableSelector.from_string(variable_literal).path
if len(selector) != 2:
if len(selector) not in (2, 3):
raise ValueError(f"Selector not valid - {selector}")
return selector
@@ -196,6 +199,16 @@ class VariablePool:
return None
return var_instance
@staticmethod
def _extract_field(struct: "VariableStruct", field: str | None) -> Any:
"""If field is given, drill into a dict/object variable's value."""
if field is None:
return struct.instance.get_value()
value = struct.instance.get_value()
if not isinstance(value, dict):
raise KeyError(f"Variable is not an object, cannot access field '{field}'")
return value.get(field)
def get_instance(
self,
selector: str,
@@ -250,12 +263,14 @@ class VariablePool:
Raises:
KeyError: If strict is True and the variable does not exist.
"""
path = self.transform_selector(selector)
variable_struct = self._get_variable_struct(selector)
if variable_struct is None:
if strict:
raise KeyError(f"{selector} not exist")
return default
if len(path) == 3:
return self._extract_field(variable_struct, path[2])
return variable_struct.instance.get_value()
def get_literal(
@@ -282,12 +297,15 @@ class VariablePool:
Raises:
KeyError: If strict is True and the variable does not exist.
"""
path = self.transform_selector(selector)
variable_struct = self._get_variable_struct(selector)
if variable_struct is None:
if strict:
raise KeyError(f"{selector} not exist")
return default
if len(path) == 3:
value = self._extract_field(variable_struct, path[2])
return str(value) if value is not None else ""
return variable_struct.instance.to_literal()
async def set(
@@ -345,7 +363,14 @@ class VariablePool:
Returns:
变量是否存在
"""
return self._get_variable_struct(selector) is not None
path = self.transform_selector(selector)
struct = self._get_variable_struct(selector)
if struct is None:
return False
if len(path) == 3:
value = struct.instance.get_value()
return isinstance(value, dict) and path[2] in value
return True
def lazy_namespace(self, namespace: str, literal: bool = False) -> LazyVariableDict:
return LazyVariableDict(self.variables.get(namespace, {}), literal)

View File

@@ -55,9 +55,9 @@ class CycleGraphNode(BaseNode):
if config.output_type in [
VariableType.ARRAY_FILE,
VariableType.ARRAY_STRING,
VariableType.NUMBER,
VariableType.ARRAY_NUMBER,
VariableType.ARRAY_OBJECT,
VariableType.BOOLEAN
VariableType.ARRAY_BOOLEAN
]:
if config.flatten:
outputs['output'] = config.output_type

View File

@@ -72,8 +72,9 @@ class HttpContentTypeConfig(BaseModel):
@classmethod
def validate_data(cls, v, info):
content_type = info.data.get("content_type")
if content_type == HttpContentType.FROM_DATA and not isinstance(v, HttpFormData):
raise ValueError("When content_type is 'form-data', data must be of type HttpFormData")
if content_type == HttpContentType.FROM_DATA and (
not isinstance(v, list) or not all(isinstance(item, HttpFormData) for item in v)):
raise ValueError("When content_type is 'form-data', data must be a list of HttpFormData")
elif content_type in [HttpContentType.JSON] and not isinstance(v, str):
raise ValueError("When content_type is JSON, data must be of type str")
elif content_type in [HttpContentType.WWW_FORM] and not isinstance(v, dict):

View File

@@ -260,17 +260,22 @@ class HttpRequestNode(BaseNode):
))
case HttpContentType.FROM_DATA:
data = {}
content["files"] = {}
files = []
for item in self.typed_config.body.data:
key = self._render_template(item.key, variable_pool)
if item.type == "text":
data[self._render_template(item.key, variable_pool)] = self._render_template(item.value,
variable_pool)
data[key] = self._render_template(item.value, variable_pool)
elif item.type == "file":
content["files"][self._render_template(item.key, variable_pool)] = (
uuid.uuid4().hex,
await variable_pool.get_instance(item.value).get_content()
)
file_instance = variable_pool.get_instance(item.value)
if isinstance(file_instance, ArrayVariable):
for v in file_instance.value:
if isinstance(v, FileVariable):
files.append((key, (uuid.uuid4().hex, await v.get_content())))
elif isinstance(file_instance, FileVariable):
files.append((key, (uuid.uuid4().hex, await file_instance.get_content())))
content["data"] = data
if files:
content["files"] = files
case HttpContentType.BINARY:
content["files"] = []
file_instence = variable_pool.get_instance(self.typed_config.body.data)

View File

@@ -8,6 +8,8 @@ from langchain_core.documents import Document
from app.core.error_codes import BizCode
from app.core.exceptions import BusinessException
from app.core.models import RedBearRerank, RedBearModelConfig
from app.core.rag.llm.chat_model import Base
from app.core.rag.llm.embedding_model import OpenAIEmbed
from app.core.rag.models.chunk import DocumentChunk
from app.core.rag.vdb.elasticsearch.elasticsearch_vector import ElasticSearchVectorFactory
from app.core.workflow.engine.state_manager import WorkflowState
@@ -39,8 +41,9 @@ class KnowledgeRetrievalNode(BaseNode):
if isinstance(business_result, dict) and "chunks" in business_result:
return business_result["chunks"]
return business_result
def _extract_citations(self, business_result: Any) -> list:
@staticmethod
def _extract_citations(business_result: Any) -> list:
if isinstance(business_result, dict):
return business_result.get("citations", [])
return []
@@ -230,23 +233,23 @@ class KnowledgeRetrievalNode(BaseNode):
}
)
)
case RetrieveType.HYBRID:
case retrieve_type if retrieve_type in (RetrieveType.HYBRID, RetrieveType.Graph):
rs1_task = asyncio.to_thread(
vector_service.search_by_vector, **{
"query": query,
"top_k": kb_config.top_k,
"indices": indices,
"score_threshold": kb_config.vector_similarity_weight
}
)
vector_service.search_by_vector, **{
"query": query,
"top_k": kb_config.top_k,
"indices": indices,
"score_threshold": kb_config.vector_similarity_weight
}
)
rs2_task = asyncio.to_thread(
vector_service.search_by_full_text, **{
"query": query,
"top_k": kb_config.top_k,
"indices": indices,
"score_threshold": kb_config.similarity_threshold
}
)
vector_service.search_by_full_text, **{
"query": query,
"top_k": kb_config.top_k,
"indices": indices,
"score_threshold": kb_config.similarity_threshold
}
)
rs1, rs2 = await asyncio.gather(rs1_task, rs2_task)
# Deduplicate hybrid retrieval results
@@ -266,6 +269,33 @@ class KnowledgeRetrievalNode(BaseNode):
key=lambda d: d.metadata.get("score", 0),
reverse=True
)[:kb_config.top_k])
if kb_config.retrieve_type == RetrieveType.Graph:
from app.core.rag.common.settings import kg_retriever
llm_key = self.model_balance(db_knowledge.llm)
emb_key = self.model_balance(db_knowledge.embedding)
chat_model = Base(
key=llm_key.api_key,
model_name=llm_key.model_name,
base_url=llm_key.api_base
)
embedding_model = OpenAIEmbed(
key=emb_key.api_key,
model_name=emb_key.model_name,
base_url=emb_key.api_base
)
doc = await asyncio.to_thread(
kg_retriever.retrieval,
question=query,
workspace_ids=[str(db_knowledge.workspace_id)],
kb_ids=[str(kb_config.kb_id)],
emb_mdl=embedding_model,
llm=chat_model
)
if doc:
rs.insert(0, DocumentChunk(
page_content=doc.get("page_content", ""),
metadata=doc.get("metadata", {})
))
case _:
raise RuntimeError("Unknown retrieval type")
return rs

View File

@@ -84,7 +84,7 @@ class FileVariable(BaseVariable):
total_bytes = 0
chunks = []
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(follow_redirects=True) as client:
async with client.stream("GET", self.value.url) as resp:
resp.raise_for_status()
async for chunk in resp.aiter_bytes(8192):

View File

@@ -61,3 +61,15 @@ def get_apps_by_id(db: Session, app_id: uuid.UUID) -> App:
"""根据工作空间ID查询应用"""
repo = AppRepository(db)
return repo.get_apps_by_id(app_id)
def get_release_by_id(db: Session, app_id: uuid.UUID, release_id: uuid.UUID):
"""根据发布版本ID查询发布快照仅返回激活状态"""
from app.models.app_release_model import AppRelease
return db.scalars(
select(AppRelease).where(
AppRelease.app_id == app_id,
AppRelease.id == release_id,
AppRelease.is_active.is_(True),
)
).first()

View File

@@ -5,16 +5,9 @@ Implicit Emotions Storage Repository
事务由调用方控制,仓储层只使用 flush/refresh
"""
import logging
from datetime import date, datetime, timezone
from datetime import datetime, timedelta, timezone
from typing import Generator, Optional
class TimeFilterUnavailableError(Exception):
"""redis_client 不可用,无法执行时间轴筛选。
调用方捕获此异常后可选择回退到 get_all_user_ids 进行全量处理。
"""
import redis
from sqlalchemy import exists, not_, select
from sqlalchemy.orm import Session
@@ -25,6 +18,13 @@ from app.models.implicit_emotions_storage_model import ImplicitEmotionsStorage
logger = logging.getLogger(__name__)
class TimeFilterUnavailableError(Exception):
"""redis_client 不可用,无法执行时间轴筛选。
调用方捕获此异常后可选择回退到 get_all_user_ids 进行全量处理。
"""
class ImplicitEmotionsStorageRepository:
"""隐性记忆和情绪存储仓储类"""
@@ -216,9 +216,7 @@ class ImplicitEmotionsStorageRepository:
"""
from sqlalchemy import String as SAString
from sqlalchemy import cast
CST = timezone(timedelta(hours=8))
now_cst = datetime.now(CST)
today_start = now_cst.replace(hour=0, minute=0, second=0, microsecond=0).astimezone(timezone.utc).replace(tzinfo=None)
today_start = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
tomorrow_start = today_start + timedelta(days=1)
offset = 0
while True:

View File

@@ -23,6 +23,7 @@ SET s += {
end_user_id: statement.end_user_id,
stmt_type: statement.stmt_type,
statement: statement.statement,
speaker: statement.speaker,
emotion_intensity: statement.emotion_intensity,
emotion_target: statement.emotion_target,
emotion_subject: statement.emotion_subject,
@@ -56,6 +57,7 @@ SET c += {
expired_at: chunk.expired_at,
dialog_id: chunk.dialog_id,
content: chunk.content,
speaker: chunk.speaker,
chunk_embedding: chunk.chunk_embedding,
sequence_number: chunk.sequence_number,
start_index: chunk.start_index,
@@ -91,6 +93,8 @@ SET e.name = CASE WHEN entity.name IS NOT NULL AND entity.name <> '' THEN entity
END,
e.statement_id = CASE WHEN entity.statement_id IS NOT NULL AND entity.statement_id <> '' THEN entity.statement_id ELSE e.statement_id END,
e.aliases = CASE
// 用户实体的 aliases 由 PgSQL end_user_info 作为唯一权威源,知识抽取完全不写入
WHEN entity.name IN ['用户', '', 'User', 'I'] THEN e.aliases
WHEN entity.aliases IS NOT NULL AND size(entity.aliases) > 0
THEN CASE
WHEN e.aliases IS NULL THEN entity.aliases
@@ -283,7 +287,7 @@ LIMIT $limit
"""
SEARCH_STATEMENTS_BY_KEYWORD = """
CALL db.index.fulltext.queryNodes("statementsFulltext", $q) YIELD node AS s, score
CALL db.index.fulltext.queryNodes("statementsFulltext", $query) YIELD node AS s, score
WHERE ($end_user_id IS NULL OR s.end_user_id = $end_user_id)
OPTIONAL MATCH (c:Chunk)-[:CONTAINS]->(s)
OPTIONAL MATCH (s)-[:REFERENCES_ENTITY]->(e:ExtractedEntity)
@@ -307,7 +311,7 @@ LIMIT $limit
"""
# 查询实体名称包含指定字符串的实体
SEARCH_ENTITIES_BY_NAME = """
CALL db.index.fulltext.queryNodes("entitiesFulltext", $q) YIELD node AS e, score
CALL db.index.fulltext.queryNodes("entitiesFulltext", $query) YIELD node AS e, score
WHERE ($end_user_id IS NULL OR e.end_user_id = $end_user_id)
OPTIONAL MATCH (s:Statement)-[:REFERENCES_ENTITY]->(e)
OPTIONAL MATCH (c:Chunk)-[:CONTAINS]->(s)
@@ -337,21 +341,21 @@ LIMIT $limit
"""
SEARCH_ENTITIES_BY_NAME_OR_ALIAS = """
CALL db.index.fulltext.queryNodes("entitiesFulltext", $q) YIELD node AS e, score
CALL db.index.fulltext.queryNodes("entitiesFulltext", $query) YIELD node AS e, score
WHERE ($end_user_id IS NULL OR e.end_user_id = $end_user_id)
WITH e, score
WITH collect({entity: e, score: score}) AS fulltextResults
With collect({entity: e, score: score}) AS fulltextResults
OPTIONAL MATCH (ae:ExtractedEntity)
WHERE ($end_user_id IS NULL OR ae.end_user_id = $end_user_id)
AND ae.aliases IS NOT NULL
AND ANY(alias IN ae.aliases WHERE toLower(alias) CONTAINS toLower($q))
AND ANY(alias IN ae.aliases WHERE toLower(alias) CONTAINS toLower($query))
WITH fulltextResults, collect(ae) AS aliasEntities
UNWIND (fulltextResults + [x IN aliasEntities | {entity: x, score:
CASE
WHEN ANY(alias IN x.aliases WHERE toLower(alias) = toLower($q)) THEN 1.0
WHEN ANY(alias IN x.aliases WHERE toLower(alias) STARTS WITH toLower($q)) THEN 0.9
WHEN ANY(alias IN x.aliases WHERE toLower(alias) = toLower($query)) THEN 1.0
WHEN ANY(alias IN x.aliases WHERE toLower(alias) STARTS WITH toLower($query)) THEN 0.9
ELSE 0.8
END
}]) AS row
@@ -384,7 +388,7 @@ LIMIT $limit
SEARCH_CHUNKS_BY_CONTENT = """
CALL db.index.fulltext.queryNodes("chunksFulltext", $q) YIELD node AS c, score
CALL db.index.fulltext.queryNodes("chunksFulltext", $query) YIELD node AS c, score
WHERE ($end_user_id IS NULL OR c.end_user_id = $end_user_id)
OPTIONAL MATCH (c)-[:CONTAINS]->(s:Statement)
OPTIONAL MATCH (s)-[:REFERENCES_ENTITY]->(e:ExtractedEntity)
@@ -501,7 +505,7 @@ LIMIT $limit
"""
SEARCH_STATEMENTS_BY_KEYWORD_TEMPORAL = """
CALL db.index.fulltext.queryNodes("statementsFulltext", $q) YIELD node AS s, score
CALL db.index.fulltext.queryNodes("statementsFulltext", $query) YIELD node AS s, score
WHERE ($end_user_id IS NULL OR s.end_user_id = $end_user_id)
AND ((($start_date IS NULL OR (s.created_at IS NOT NULL AND datetime(s.created_at) >= datetime($start_date)))
AND ($end_date IS NULL OR (s.created_at IS NOT NULL AND datetime(s.created_at) <= datetime($end_date))))
@@ -677,7 +681,7 @@ SET n.invalid_at = $new_invalid_at
# MemorySummary keyword search using fulltext index
SEARCH_MEMORY_SUMMARIES_BY_KEYWORD = """
CALL db.index.fulltext.queryNodes("summariesFulltext", $q) YIELD node AS m, score
CALL db.index.fulltext.queryNodes("summariesFulltext", $query) YIELD node AS m, score
WHERE ($end_user_id IS NULL OR m.end_user_id = $end_user_id)
OPTIONAL MATCH (m)-[:DERIVED_FROM_STATEMENT]->(s:Statement)
RETURN m.id AS id,
@@ -1363,7 +1367,7 @@ RETURN c.community_id AS community_id
# Community keyword search: matches name or summary via fulltext index
SEARCH_COMMUNITIES_BY_KEYWORD = """
CALL db.index.fulltext.queryNodes("communitiesFulltext", $q) YIELD node AS c, score
CALL db.index.fulltext.queryNodes("communitiesFulltext", $query) YIELD node AS c, score
WHERE ($end_user_id IS NULL OR c.end_user_id = $end_user_id)
RETURN c.community_id AS id,
c.name AS name,
@@ -1451,7 +1455,7 @@ RETURN elementId(r) AS uuid
"""
SEARCH_PERCEPTUAL_BY_KEYWORD = """
CALL db.index.fulltext.queryNodes("perceptualFulltext", $q) YIELD node AS p, score
CALL db.index.fulltext.queryNodes("perceptualFulltext", $query) YIELD node AS p, score
WHERE p.end_user_id = $end_user_id
RETURN p.id AS id,
p.end_user_id AS end_user_id,

View File

@@ -186,6 +186,58 @@ async def save_dialog_and_statements_to_neo4j(
Returns:
bool: True if successful, False otherwise
"""
# TODO 需要在去重消歧节阶段,做以下逻辑的处理
# 预处理:对特殊实体("用户"、"AI助手")复用 Neo4j 中已有节点的 ID
# 确保同一个 end_user_id 下只有一个"用户"节点和一个"AI助手"节点。
if entity_nodes:
_SPECIAL_NAMES = {"用户", "", "user", "i", "ai助手", "助手", "ai assistant", "assistant"}
end_user_id = entity_nodes[0].end_user_id if entity_nodes else None
if end_user_id:
try:
# 查询已有的特殊实体
cypher = """
MATCH (e:ExtractedEntity)
WHERE e.end_user_id = $end_user_id AND toLower(e.name) IN $names
RETURN e.id AS id, e.name AS name
"""
existing = await connector.execute_query(
cypher,
end_user_id=end_user_id,
names=list(_SPECIAL_NAMES),
)
# 建立 name(lower) → existing_id 映射
existing_id_map = {}
for record in (existing or []):
name_lower = (record.get("name") or "").strip().lower()
if name_lower and record.get("id"):
existing_id_map[name_lower] = record["id"]
if existing_id_map:
# 替换新实体的 ID 为已有 ID同时更新所有引用该 ID 的边
for ent in entity_nodes:
name_lower = (ent.name or "").strip().lower()
if name_lower in existing_id_map:
old_id = ent.id
new_id = existing_id_map[name_lower]
if old_id != new_id:
ent.id = new_id
# 更新 statement_entity_edges 中的引用
for edge in statement_entity_edges:
if edge.target == old_id:
edge.target = new_id
if edge.source == old_id:
edge.source = new_id
# 更新 entity_edges 中的引用
for edge in entity_edges:
if edge.source == old_id:
edge.source = new_id
if edge.target == old_id:
edge.target = new_id
logger.info(
f"特殊实体 '{ent.name}' ID 复用: {old_id[:8]}... → {new_id[:8]}..."
)
except Exception as e:
logger.warning(f"特殊实体 ID 复用查询失败(不影响写入): {e}")
# 定义事务函数,将所有写操作放在一个事务中
async def _save_all_in_transaction(tx):

View File

@@ -2,6 +2,7 @@ import asyncio
import logging
from typing import Any, Dict, List, Optional
from app.core.memory.utils.data.text_utils import escape_lucene_query
from app.repositories.neo4j.cypher_queries import (
CHUNK_EMBEDDING_SEARCH,
COMMUNITY_EMBEDDING_SEARCH,
@@ -87,7 +88,7 @@ async def _update_activation_values_batch(
unique_node_ids.append(node_id)
if not unique_node_ids:
logger.warning(f"批量更新激活值没有有效的节点ID")
logger.warning("批量更新激活值没有有效的节点ID")
return nodes
# 记录去重信息(仅针对具有有效 ID 的节点)
@@ -223,7 +224,7 @@ async def _update_search_results_activation(
async def search_graph(
connector: Neo4jConnector,
q: str,
query: str,
end_user_id: Optional[str] = None,
limit: int = 50,
include: List[str] = None,
@@ -234,14 +235,14 @@ async def search_graph(
OPTIMIZED: Runs all queries in parallel using asyncio.gather()
INTEGRATED: Updates activation values for knowledge nodes before returning results
- Statements: matches s.statement CONTAINS q
- Entities: matches e.name CONTAINS q
- Chunks: matches s.content CONTAINS q (from Statement nodes)
- Summaries: matches ms.content CONTAINS q
- Statements: matches s.statement CONTAINS query
- Entities: matches e.name CONTAINS query
- Chunks: matches s.content CONTAINS query (from Statement nodes)
- Summaries: matches ms.content CONTAINS query
Args:
connector: Neo4j connector
q: Query text
query: Query text for full-text search
end_user_id: Optional group filter
limit: Max results per category
include: List of categories to search (default: all)
@@ -252,6 +253,9 @@ async def search_graph(
if include is None:
include = ["statements", "chunks", "entities", "summaries"]
# Escape Lucene special characters to prevent query parse errors
escaped_query = escape_lucene_query(query)
# Prepare tasks for parallel execution
tasks = []
task_keys = []
@@ -260,7 +264,7 @@ async def search_graph(
tasks.append(connector.execute_query(
SEARCH_STATEMENTS_BY_KEYWORD,
json_format=True,
q=q,
query=escaped_query,
end_user_id=end_user_id,
limit=limit,
))
@@ -270,7 +274,7 @@ async def search_graph(
tasks.append(connector.execute_query(
SEARCH_ENTITIES_BY_NAME_OR_ALIAS,
json_format=True,
q=q,
query=escaped_query,
end_user_id=end_user_id,
limit=limit,
))
@@ -280,7 +284,7 @@ async def search_graph(
tasks.append(connector.execute_query(
SEARCH_CHUNKS_BY_CONTENT,
json_format=True,
q=q,
query=escaped_query,
end_user_id=end_user_id,
limit=limit,
))
@@ -290,7 +294,7 @@ async def search_graph(
tasks.append(connector.execute_query(
SEARCH_MEMORY_SUMMARIES_BY_KEYWORD,
json_format=True,
q=q,
query=escaped_query,
end_user_id=end_user_id,
limit=limit,
))
@@ -300,7 +304,7 @@ async def search_graph(
tasks.append(connector.execute_query(
SEARCH_COMMUNITIES_BY_KEYWORD,
json_format=True,
q=q,
query=escaped_query,
end_user_id=end_user_id,
limit=limit,
))
@@ -482,7 +486,7 @@ async def search_graph_by_embedding(
update_time = time.time() - update_start
logger.info(f"[PERF] Activation value updates took: {update_time:.4f}s")
else:
logger.info(f"[PERF] Skipping activation updates (only summaries)")
logger.info("[PERF] Skipping activation updates (only summaries)")
return results
@@ -520,7 +524,7 @@ async def get_dedup_candidates_for_entities( # 适配新版查询:使用全
# 全文索引按名称检索(包含 CONTAINS 语义)
rows = await connector.execute_query(
SEARCH_ENTITIES_BY_NAME,
q=name,
query=escape_lucene_query(name),
end_user_id=end_user_id,
limit=100,
)
@@ -544,7 +548,7 @@ async def get_dedup_candidates_for_entities( # 适配新版查询:使用全
try:
rows = await connector.execute_query(
SEARCH_ENTITIES_BY_NAME,
q=name.lower(),
query=escape_lucene_query(name.lower()),
end_user_id=end_user_id,
limit=100,
)
@@ -593,11 +597,12 @@ async def search_graph_by_keyword_temporal(
- Returns up to 'limit' statements
"""
if not query_text:
logger.warning(f"query_text不能为空")
logger.warning("query_text不能为空")
return {"statements": []}
escaped_query = escape_lucene_query(query_text)
statements = await connector.execute_query(
SEARCH_STATEMENTS_BY_KEYWORD_TEMPORAL,
q=query_text,
query=escaped_query,
end_user_id=end_user_id,
start_date=start_date,
end_date=end_date,
@@ -671,7 +676,7 @@ async def search_graph_by_dialog_id(
- Returns up to 'limit' dialogues
"""
if not dialog_id:
logger.warning(f"dialog_id不能为空")
logger.warning("dialog_id不能为空")
return {"dialogues": []}
dialogues = await connector.execute_query(
@@ -690,7 +695,7 @@ async def search_graph_by_chunk_id(
limit: int = 1,
) -> Dict[str, List[Dict[str, Any]]]:
if not chunk_id:
logger.warning(f"chunk_id不能为空")
logger.warning("chunk_id不能为空")
return {"chunks": []}
chunks = await connector.execute_query(
SEARCH_CHUNK_BY_CHUNK_ID,
@@ -968,7 +973,7 @@ async def search_graph_l_valid_at(
async def search_perceptual(
connector: Neo4jConnector,
q: str,
query: str,
end_user_id: Optional[str] = None,
limit: int = 10,
) -> Dict[str, List[Dict[str, Any]]]:
@@ -979,7 +984,7 @@ async def search_perceptual(
Args:
connector: Neo4j connector
q: Query text
query: Query text for full-text search
end_user_id: Optional user filter
limit: Max results
@@ -989,7 +994,7 @@ async def search_perceptual(
try:
perceptuals = await connector.execute_query(
SEARCH_PERCEPTUAL_BY_KEYWORD,
q=q,
query=escape_lucene_query(query),
end_user_id=end_user_id,
limit=limit,
)

View File

@@ -77,11 +77,11 @@ class Neo4jConnector:
"""
await self.driver.close()
async def execute_query(self, query: str, json_format=False, **kwargs: Any) -> List[Dict[str, Any]]:
async def execute_query(self, cypher: str, json_format=False, **kwargs: Any) -> List[Dict[str, Any]]:
"""执行Cypher查询
Args:
query: Cypher查询语句
cypher: Cypher查询语句
json_format: json格式化
**kwargs: 查询参数将作为参数传递给Cypher查询
@@ -92,7 +92,7 @@ class Neo4jConnector:
"""
result = await self.driver.execute_query(
query,
cypher,
database="neo4j",
**kwargs
)

View File

@@ -161,6 +161,17 @@ class BuiltinToolRepository:
BuiltinToolConfig.id == tool_id
).first()
@staticmethod
def get_existing_tool_classes(db: Session, tenant_id: uuid.UUID) -> set:
"""获取该租户已有的内置工具 tool_class 集合"""
rows = db.query(BuiltinToolConfig.tool_class).join(
ToolConfig, BuiltinToolConfig.id == ToolConfig.id
).filter(
ToolConfig.tenant_id == tenant_id,
ToolConfig.tool_type == ToolType.BUILTIN.value
).all()
return {row[0] for row in rows}
class CustomToolRepository:
"""自定义工具仓储类"""

View File

@@ -23,7 +23,7 @@ class UserRepository:
db_logger.debug(f"根据 ID 查询用户user_id={user_id}")
try:
user = self.db.query(User).options(joinedload(User.tenant)).filter(User.id == user_id).first()
user = self.db.query(User).options(joinedload(User.tenant)).filter(User.id == user_id, User.is_active.is_(True)).first()
if user:
# 检查租户状态,租户禁用时返回 None
if user.tenant and not user.tenant.is_active:
@@ -297,6 +297,10 @@ def get_user_by_id(db: Session, user_id: uuid.UUID) -> Optional[User]:
"""根据ID获取用户"""
return UserRepository(db).get_user_by_id(user_id)
def get_user_by_id_regardless_active(db: Session, user_id: uuid.UUID) -> Optional[User]:
"""根据ID获取用户不过滤 is_active用于启用/禁用场景)"""
return db.query(User).filter(User.id == user_id).first()
def get_user_by_email(db: Session, email: str) -> Optional[User]:
"""根据邮箱获取用户"""
return UserRepository(db).get_user_by_email(email)

View File

@@ -616,6 +616,7 @@ class AppChatRequest(BaseModel):
stream: bool = Field(default=False, description="是否流式返回")
thinking: bool = Field(default=False, description="是否启用深度思考需Agent配置支持")
files: List[FileInput] = Field(default_factory=list, description="附件列表(支持多文件)")
version: Optional[uuid.UUID] = Field(default=None, description="指定发布版本ID不传则使用当前生效版本")
class DraftRunRequest(BaseModel):

View File

@@ -165,7 +165,14 @@ class AppChatService:
multimodal_service = MultimodalService(self.db, model_info)
processed_files = await multimodal_service.process_files(files)
logger.info(f"处理了 {len(processed_files)} 个文件")
# 为需要运行时上下文的工具注入上下文
for t in tools:
if hasattr(t, 'tool_instance') and hasattr(t.tool_instance, 'set_runtime_context'):
t.tool_instance.set_runtime_context(
user_id=user_id or "anonymous",
conversation_id=str(conversation_id) if conversation_id else None,
uploaded_files=processed_files or []
)
# 调用 Agent支持多模态
result = await agent.chat(
message=message,
@@ -413,6 +420,15 @@ class AppChatService:
processed_files = await multimodal_service.process_files(files)
logger.info(f"处理了 {len(processed_files)} 个文件")
# 为需要运行时上下文的工具注入上下文
for t in tools:
if hasattr(t, 'tool_instance') and hasattr(t.tool_instance, 'set_runtime_context'):
t.tool_instance.set_runtime_context(
user_id=user_id or "anonymous",
conversation_id=str(conversation_id) if conversation_id else None,
uploaded_files=processed_files or []
)
# 流式调用 Agent支持多模态同时并行启动 TTS
full_content = ""
full_reasoning = ""

View File

@@ -73,15 +73,14 @@ class AppDslService:
AppType.MULTI_AGENT: "multi_agent_config",
AppType.WORKFLOW: "workflow"
}.get(app.type, "config")
config_data = self._enrich_release_config(app.type, release.config or {})
config_data = self._enrich_release_config(app.type, release.config or {}, release.default_model_config_id)
dsl = {**meta, "app": app_meta, config_key: config_data}
return yaml.dump(dsl, default_flow_style=False, allow_unicode=True), f"{release.name}_v{release.version_name}.yaml"
def _enrich_release_config(self, app_type: str, cfg: dict) -> dict:
def _enrich_release_config(self, app_type: str, cfg: dict, default_model_config_id=None) -> dict:
if app_type == AppType.AGENT:
enriched = {**cfg}
if "default_model_config_id" in cfg:
enriched["default_model_config_ref"] = self._model_ref(cfg["default_model_config_id"])
enriched["default_model_config_ref"] = self._model_ref(default_model_config_id)
if "knowledge_retrieval" in cfg:
enriched["knowledge_retrieval"] = self._enrich_knowledge_retrieval(cfg["knowledge_retrieval"])
if "tools" in cfg:
@@ -91,8 +90,7 @@ class AppDslService:
return enriched
if app_type == AppType.MULTI_AGENT:
enriched = {**cfg}
if "default_model_config_id" in cfg:
enriched["default_model_config_ref"] = self._model_ref(cfg["default_model_config_id"])
enriched["default_model_config_ref"] = self._model_ref(default_model_config_id)
if "master_agent_id" in cfg:
enriched["master_agent_ref"] = self._release_ref(cfg["master_agent_id"])
if "sub_agents" in cfg:

View File

@@ -411,6 +411,7 @@ class AppService:
edges=[edge.model_dump() for edge in data.edges] if data.edges else [],
variables=[var.model_dump() for var in data.variables] if data.variables else [],
execution_config=data.execution_config.model_dump() if data.execution_config else {},
features=data.features if data.features else {},
triggers=[trigger.model_dump() for trigger in data.triggers] if data.triggers else [],
is_active=True,
created_at=now,
@@ -619,6 +620,28 @@ class AppService:
self._validate_app_accessible(app, workspace_id)
return app
def get_release_by_id(self, app_id: uuid.UUID, release_id: uuid.UUID) -> AppRelease:
"""按发布版本ID获取发布快照
Args:
app_id: 应用ID
release_id: 发布版本ID
Returns:
AppRelease: 发布快照
Raises:
BusinessException: 版本不存在或已下线
"""
from app.repositories.app_repository import get_release_by_id
release = get_release_by_id(self.db, app_id, release_id)
if not release:
raise BusinessException(
f"版本 {release_id} 不存在或已下线",
BizCode.RELEASE_NOT_FOUND,
)
return release
def create_app(
self,
*,

View File

@@ -640,7 +640,14 @@ class AgentRunService:
multimodal_service = MultimodalService(self.db, model_info)
processed_files = await multimodal_service.process_files(files)
logger.info(f"处理了 {len(processed_files)} 个文件provider={provider}")
# 为需要运行时上下文的工具注入上下文
for t in tools:
if hasattr(t, 'tool_instance') and hasattr(t.tool_instance, 'set_runtime_context'):
t.tool_instance.set_runtime_context(
user_id=user_id or "anonymous",
conversation_id=str(conversation_id) if conversation_id else None,
uploaded_files=processed_files or []
)
# 7. 知识库检索
context = None
@@ -890,7 +897,14 @@ class AgentRunService:
multimodal_service = MultimodalService(self.db, model_info)
processed_files = await multimodal_service.process_files(files)
logger.info(f"处理了 {len(processed_files)} 个文件provider={provider}")
# 为需要运行时上下文的工具注入上下文
for t in tools:
if hasattr(t, 'tool_instance') and hasattr(t.tool_instance, 'set_runtime_context'):
t.tool_instance.set_runtime_context(
user_id=user_id or "anonymous",
conversation_id=str(conversation_id) if conversation_id else None,
uploaded_files=processed_files or []
)
# 7. 知识库检索
context = None

View File

@@ -679,9 +679,9 @@ class EmotionAnalyticsService:
# 查询用户的实体和标签
query = """
MATCH (e:Entity)
MATCH (e:ExtractedEntity)
WHERE e.end_user_id = $end_user_id
RETURN e.name as name, e.type as type
RETURN e.name as name, e.entity_type as type
ORDER BY e.created_at DESC
LIMIT 20
"""

View File

@@ -34,6 +34,7 @@ from app.schemas.implicit_memory_schema import (
UserMemorySummary,
)
from app.schemas.memory_config_schema import MemoryConfig
from app.services.memory_base_service import MIN_MEMORY_SUMMARY_COUNT
from sqlalchemy.orm import Session
logger = logging.getLogger(__name__)
@@ -379,12 +380,59 @@ class ImplicitMemoryService:
raise
def _build_empty_profile(self) -> dict:
"""构建 MemorySummary 不足时返回的固定空白画像数据"""
now_ms = int(datetime.utcnow().timestamp() * 1000)
insufficient = "Insufficient data for analysis"
def _empty_dimension(name: str) -> dict:
return {
"evidence": [insufficient],
"reasoning": f"No clear evidence found for {name} dimension",
"percentage": 0.0,
"dimension_name": name,
"confidence_level": 20,
}
def _empty_category(name: str) -> dict:
return {
"evidence": [insufficient],
"percentage": 25.0,
"category_name": name,
"trending_direction": None,
}
return {
"habits": [],
"portrait": {
"aesthetic": _empty_dimension("aesthetic"),
"creativity": _empty_dimension("creativity"),
"literature": _empty_dimension("literature"),
"technology": _empty_dimension("technology"),
"historical_trends": None,
"analysis_timestamp": now_ms,
"total_summaries_analyzed": 0,
},
"preferences": [],
"interest_areas": {
"art": _empty_category("art"),
"tech": _empty_category("tech"),
"music": _empty_category("music"),
"lifestyle": _empty_category("lifestyle"),
"analysis_timestamp": now_ms,
"total_summaries_analyzed": 0,
},
}
async def generate_complete_profile(
self,
user_id: str
) -> dict:
"""生成完整的用户画像包含所有4个模块
需要该用户的 MemorySummary 节点数量 >= 5 才会真正调用 LLM 生成画像,
否则返回固定的空白画像数据。
Args:
user_id: 用户ID
@@ -394,6 +442,16 @@ class ImplicitMemoryService:
logger.info(f"生成完整用户画像: user={user_id}")
try:
# 前置检查:查询该用户有效的 MemorySummary 节点数量(排除孤立节点)
from app.services.memory_base_service import MemoryBaseService
base_service = MemoryBaseService()
memory_summary_count = await base_service.get_valid_memory_summary_count(user_id)
logger.info(f"用户 MemorySummary 节点数量: {memory_summary_count} (user={user_id})")
if memory_summary_count < MIN_MEMORY_SUMMARY_COUNT:
logger.info(f"MemorySummary 数量不足 {MIN_MEMORY_SUMMARY_COUNT}(当前 {memory_summary_count}),返回空白画像: user={user_id}")
return self._build_empty_profile()
# 并行调用4个分析方法
preferences, portrait, interest_areas, habits = await asyncio.gather(
self.get_preference_tags(user_id=user_id),

View File

@@ -265,12 +265,50 @@ async def Translation_English(modid, text, fields=None):
# 其他类型数字、布尔值、None等原样返回
else:
return text
# 隐性记忆画像生成所需的最低 MemorySummary 节点数量
MIN_MEMORY_SUMMARY_COUNT = 5
class MemoryBaseService:
"""记忆服务基类,提供共享的辅助方法"""
def __init__(self):
self.neo4j_connector = Neo4jConnector()
async def get_valid_memory_summary_count(
self,
end_user_id: str
) -> int:
"""获取用户有效的 MemorySummary 节点数量(排除孤立节点)。
只统计存在 DERIVED_FROM_STATEMENT 关系的 MemorySummary 节点。
Args:
end_user_id: 终端用户ID
Returns:
有效 MemorySummary 节点数量
"""
try:
query = """
MATCH (n:MemorySummary)-[:DERIVED_FROM_STATEMENT]->(:Statement)
WHERE n.end_user_id = $end_user_id
RETURN count(DISTINCT n) as count
"""
result = await self.neo4j_connector.execute_query(
query, end_user_id=end_user_id
)
count = result[0]["count"] if result and len(result) > 0 else 0
logger.debug(
f"有效 MemorySummary 节点数量: {count} (end_user_id={end_user_id})"
)
return count
except Exception as e:
logger.error(
f"获取有效 MemorySummary 数量失败: {str(e)}", exc_info=True
)
return 0
@staticmethod
def parse_timestamp(timestamp_value) -> Optional[int]:
"""

View File

@@ -803,7 +803,6 @@ def get_rag_content(
"page": {
"page": page,
"pagesize": pagesize,
"total": 0,
"hasnext": False,
},
"items": []
@@ -897,13 +896,12 @@ def get_rag_content(
"page": {
"page": page,
"pagesize": pagesize,
"total": global_total,
"hasnext": offset_end < global_total,
},
"items": conversations
}
business_logger.info(f"成功获取RAG内容: total={global_total}, page={page}, 返回={len(conversations)} 条对话")
business_logger.info(f"成功获取RAG内容: page={page}, 返回={len(conversations)} 条对话")
return result
except Exception as e:

View File

@@ -227,10 +227,20 @@ class PromptOptimizerService:
content = getattr(chunk, "content", chunk)
if not content:
continue
buffer += content
if isinstance(content, str):
buffer += content
elif isinstance(content, list):
for _ in content:
buffer += _["text"]
else:
logger.error(f"Unsupported content type - {content}")
raise Exception("Unsupported content type")
cache = buffer[:-20]
last_idx = 19
while cache and cache[-1] == '\\' and last_idx > 0:
cache = buffer[:-last_idx]
last_idx -= 1
# 尝试找到 "prompt": " 开始位置
if prompt_finished:
continue
@@ -272,7 +282,7 @@ class PromptOptimizerService:
def parser_prompt_variables(prompt: str):
try:
pattern = r'\{\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}\}'
matches = re.findall(pattern, prompt)
matches = re.findall(pattern, str(prompt))
variables = list(set(matches))
return variables
except Exception as e:

View File

@@ -34,7 +34,8 @@ BUILTIN_TOOLS = {
"JsonTool": "app.core.tools.builtin.json_tool",
"BaiduSearchTool": "app.core.tools.builtin.baidu_search_tool",
"MinerUTool": "app.core.tools.builtin.mineru_tool",
"TextInTool": "app.core.tools.builtin.textin_tool"
"TextInTool": "app.core.tools.builtin.textin_tool",
"OpenClawTool": "app.core.tools.builtin.openclaw_tool",
}
@@ -340,18 +341,18 @@ class ToolService:
return {"success": False, "message": f"测试失败: {str(e)}"}
def ensure_builtin_tools_initialized(self, tenant_id: uuid.UUID):
"""确保内置工具已初始化"""
existing = self.tool_repo.exists_builtin_for_tenant(self.db, tenant_id)
if existing:
"""确保内置工具已初始化(支持增量补充新工具)"""
builtin_config = self._load_builtin_config()
if not builtin_config:
return
# 从配置文件加载内置工具定义
builtin_config = self._load_builtin_config()
existing_classes = self.builtin_repo.get_existing_tool_classes(self.db, tenant_id)
added = False
for tool_key, tool_info in builtin_config.items():
if tool_info['tool_class'] in existing_classes:
continue
try:
# 创建工具配置
initial_status = self._determine_initial_status(tool_info)
tool_config = ToolConfig(
name=tool_info['name'],
@@ -367,7 +368,6 @@ class ToolService:
self.db.add(tool_config)
self.db.flush()
# 创建内置工具配置
builtin_config_obj = BuiltinToolConfig(
id=tool_config.id,
tool_class=tool_info['tool_class'],
@@ -375,12 +375,14 @@ class ToolService:
requires_config=tool_info.get('requires_config', False)
)
self.db.add(builtin_config_obj)
added = True
except Exception as e:
logger.error(f"初始化内置工具失败: {tool_key}, {e}")
self.db.commit()
logger.info(f"租户 {tenant_id} 内置工具初始化完成")
if added:
self.db.commit()
logger.info(f"租户 {tenant_id} 内置工具增量初始化完成")
async def get_tool_methods(self, tool_id: str, tenant_id: uuid.UUID) -> Optional[List[Dict[str, Any]]]:
"""获取工具的所有方法
@@ -458,6 +460,9 @@ class ToolService:
# 对于json_tool根据操作类型返回相关参数
elif hasattr(tool_instance, 'name') and tool_instance.name == 'json_tool':
return self._get_json_tool_params(operation)
# 对于openclaw_tool根据操作类型返回不同描述的参数
elif hasattr(tool_instance, 'name') and tool_instance.name == 'openclaw_tool':
return self._get_openclaw_tool_params(operation)
# 其他工具的默认处理返回除operation外的所有参数
return [{
@@ -574,6 +579,29 @@ class ToolService:
"default": "Asia/Shanghai"
}
]
elif operation == "datetime_to_timestamp":
return [
{
"name": "input_value",
"type": "string",
"description": "输入值时间字符串2026-04-07 10:30:25",
"required": True
},
{
"name": "input_format",
"type": "string",
"description": "输入时间格式(如:%Y-%m-%d %H:%M:%S",
"required": False,
"default": "%Y-%m-%d %H:%M:%S"
},
{
"name": "from_timezone",
"type": "string",
"description": "源时区UTC, Asia/Shanghai",
"required": False,
"default": "Asia/Shanghai"
}
]
else:
# 默认返回所有参数除了operation
return [
@@ -687,6 +715,65 @@ class ToolService:
return base_params
@staticmethod
def _get_openclaw_tool_params(operation: str) -> List[Dict[str, Any]]:
"""获取 openclaw_tool 特定操作的参数"""
if operation == "print_task":
return [
{
"name": "message",
"type": "string",
"description": "发送给 OpenClaw 的打印任务描述,将用户的原始消息原封不动地传递给 OpenClaw禁止改写、补充或润色用户的原文",
"required": True
},
{
"name": "image_url",
"type": "string",
"description": "可选附带的设计图片或参考图OpenClaw 可据此生成 3D 模型",
"required": False
}
]
elif operation == "device_query":
return [
{
"name": "message",
"type": "string",
"description": "发送给 OpenClaw 的设备查询指令",
"required": True
}
]
elif operation == "image_understand":
return [
{
"name": "message",
"type": "string",
"description": "发送给 OpenClaw 的图片理解任务,应描述需要对图片做什么(如描述内容、提取文字、分析信息)",
"required": True
},
{
"name": "image_url",
"type": "string",
"description": "要分析的图片 URL 或 base64 data URI",
"required": False
}
]
else:
# general 及其他
return [
{
"name": "message",
"type": "string",
"description": "发送给 OpenClaw Agent 的任务描述,应包含完整的任务需求",
"required": True
},
{
"name": "image_url",
"type": "string",
"description": "可选,附带的图片 URL 或 base64 data URI",
"required": False
}
]
async def _get_custom_tool_methods(self, config: ToolConfig) -> List[Dict[str, Any]]:
"""获取自定义工具的方法"""
custom_config = self.custom_repo.find_by_tool_id(self.db, config.id)

View File

@@ -14,6 +14,7 @@ from pydantic import BaseModel, Field
from sqlalchemy.orm import Session
from app.core.logging_config import get_logger
from app.core.memory.storage_services.extraction_engine.deduplication.deduped_and_disamb import _USER_PLACEHOLDER_NAMES
from app.core.memory.utils.llm.llm_utils import MemoryClientFactory
from app.db import get_db_context
from app.repositories.conversation_repository import ConversationRepository
@@ -21,7 +22,7 @@ from app.repositories.end_user_repository import EndUserRepository
from app.repositories.neo4j.cypher_queries import Graph_Node_query
from app.repositories.neo4j.neo4j_connector import Neo4jConnector
from app.schemas.memory_episodic_schema import EmotionSubject, EmotionType, type_mapping
from app.services.memory_base_service import MemoryBaseService
from app.services.memory_base_service import MemoryBaseService, MIN_MEMORY_SUMMARY_COUNT
from app.services.memory_config_service import MemoryConfigService
from app.services.memory_perceptual_service import MemoryPerceptualService
from app.services.memory_short_service import ShortService
@@ -473,7 +474,7 @@ class UserMemoryService:
allowed_fields = {'other_name', 'aliases', 'meta_data'}
# 用户占位名称黑名单,不允许作为 other_name 或出现在 aliases 中
_user_placeholder_names = {'用户', '', 'User', 'I'}
_user_placeholder_names = _USER_PLACEHOLDER_NAMES
# 过滤 other_name不允许设置为占位名称
if 'other_name' in update_data and update_data['other_name'] and update_data['other_name'].strip() in _user_placeholder_names:
@@ -1500,7 +1501,7 @@ async def analytics_memory_types(
2. 工作记忆 (WORKING_MEMORY) = 会话数量(通过 ConversationRepository.get_conversation_by_user_id 获取)
3. 短期记忆 (SHORT_TERM_MEMORY) = /short_term 接口返回的问答对数量
4. 显性记忆 (EXPLICIT_MEMORY) = 情景记忆 + 语义记忆(通过 MemoryBaseService.get_explicit_memory_count 获取)
5. 隐性记忆 (IMPLICIT_MEMORY) = Statement 节点数量的三分之一
5. 隐性记忆 (IMPLICIT_MEMORY) = MemorySummary 节点数量(需 >= MIN_MEMORY_SUMMARY_COUNT 才显示,否则为 0
6. 情绪记忆 (EMOTIONAL_MEMORY) = 情绪标签统计总数(通过 MemoryBaseService.get_emotional_memory_count 获取)
7. 情景记忆 (EPISODIC_MEMORY) = memory_summary通过 MemoryBaseService.get_episodic_memory_count 获取)
8. 遗忘记忆 (FORGET_MEMORY) = 激活值低于阈值的节点数(通过 MemoryBaseService.get_forget_memory_count 获取)
@@ -1557,23 +1558,15 @@ async def analytics_memory_types(
logger.warning(f"获取会话数量失败工作记忆数量设为0: {str(e)}")
work_count = 0
# 获取隐性记忆数量(基于 Statement 节点数量的三分之一
# 获取隐性记忆数量(基于有关联关系的 MemorySummary 节点数量,需 >= MIN_MEMORY_SUMMARY_COUNT 才计入
implicit_count = 0
if end_user_id:
try:
# 查询 Statement 节点数量
query = """
MATCH (n:Statement)
WHERE n.end_user_id = $end_user_id
RETURN count(n) as count
"""
result = await _neo4j_connector.execute_query(query, end_user_id=end_user_id)
statement_count = result[0]["count"] if result and len(result) > 0 else 0
# 取三分之一作为隐性记忆数量
implicit_count = round(statement_count / 3)
logger.debug(f"隐性记忆数量Statement数量的1/3: {implicit_count} (Statement总数={statement_count}, end_user_id={end_user_id})")
memory_summary_count = await base_service.get_valid_memory_summary_count(end_user_id)
implicit_count = memory_summary_count if memory_summary_count >= MIN_MEMORY_SUMMARY_COUNT else 0
logger.debug(f"隐性记忆数量有效MemorySummary节点数: {implicit_count} (有效MemorySummary总数={memory_summary_count}, end_user_id={end_user_id})")
except Exception as e:
logger.warning(f"获取Statement数量失败隐性记忆数量设为0: {str(e)}")
logger.warning(f"获取MemorySummary数量失败隐性记忆数量设为0: {str(e)}")
implicit_count = 0
# 原有的基于行为习惯的统计方式(已注释)
@@ -1639,7 +1632,7 @@ async def analytics_memory_types(
"WORKING_MEMORY": work_count, # 工作记忆(基于会话数量)
"SHORT_TERM_MEMORY": short_term_count, # 短期记忆(基于问答对数量)
"EXPLICIT_MEMORY": explicit_count, # 显性记忆(情景记忆 + 语义记忆)
"IMPLICIT_MEMORY": implicit_count, # 隐性记忆(Statement数量的1/3
"IMPLICIT_MEMORY": implicit_count, # 隐性记忆(MemorySummary节点数需>=MIN_MEMORY_SUMMARY_COUNT
"EMOTIONAL_MEMORY": emotion_count, # 情绪记忆(使用情绪标签统计)
"EPISODIC_MEMORY": episodic_count, # 情景记忆
"FORGET_MEMORY": forget_count # 遗忘记忆(激活值低于阈值)

View File

@@ -285,7 +285,7 @@ def activate_user(db: Session, user_id_to_activate: uuid.UUID, current_user: Use
try:
# 查找用户
business_logger.debug(f"查找待激活用户: {user_id_to_activate}")
db_user = user_repository.get_user_by_id(db, user_id=user_id_to_activate)
db_user = user_repository.get_user_by_id_regardless_active(db, user_id=user_id_to_activate)
if not db_user:
business_logger.warning(f"用户不存在: {user_id_to_activate}")
raise BusinessException("用户不存在", code=BizCode.USER_NOT_FOUND)

View File

@@ -69,6 +69,7 @@ class WorkflowImportService:
edges=workflow_config.edges,
nodes=workflow_config.nodes,
variables=workflow_config.variables,
features=workflow_config.features,
warnings=workflow_config.warnings,
errors=workflow_config.errors
)
@@ -95,7 +96,8 @@ class WorkflowImportService:
workflow_config=WorkflowConfigCreate(
nodes=config["nodes"],
edges=config["edges"],
variables=config["variables"]
variables=config["variables"],
features=config.get("features", {})
)
)
)

View File

@@ -1,4 +1,5 @@
import asyncio
import json
import os
import re
import shutil
@@ -1001,7 +1002,7 @@ def sync_knowledge_for_kb(kb_id: uuid.UUID):
except Exception as e:
print(f"\n\nError during fetch feishu: {e}")
case _: # General
print(f"General: No synchronization needed\n")
print("General: No synchronization needed\n")
result = f"sync knowledge '{db_knowledge.name}' processed successfully."
return result
@@ -1510,6 +1511,7 @@ def write_all_workspaces_memory_task(self) -> Dict[str, Any]:
"status": "SUCCESS",
"total_num": total_num,
"end_user_count": len(end_users),
"end_user_details": end_user_details,
"memory_increment_id": str(memory_increment.id),
"created_at": memory_increment.created_at.isoformat(),
})
@@ -2602,35 +2604,34 @@ def init_interest_distribution_for_users(self, end_user_ids: List[str]) -> Dict[
service = MemoryAgentService()
with get_db_context() as db:
for end_user_id in end_user_ids:
# 存在性检查:缓存有数据则跳过
cached = await InterestMemoryCache.get_interest_distribution(
for end_user_id in end_user_ids:
# 存在性检查:缓存有数据则跳过
cached = await InterestMemoryCache.get_interest_distribution(
end_user_id=end_user_id,
language=language,
)
if cached is not None:
skipped += 1
continue
logger.info(f"用户 {end_user_id} 无兴趣分布缓存,开始生成")
try:
result = await service.get_interest_distribution_by_user(
end_user_id=end_user_id,
limit=5,
language=language,
)
if cached is not None:
skipped += 1
continue
logger.info(f"用户 {end_user_id} 无兴趣分布缓存,开始生成")
try:
result = await service.get_interest_distribution_by_user(
end_user_id=end_user_id,
limit=5,
language=language,
)
await InterestMemoryCache.set_interest_distribution(
end_user_id=end_user_id,
language=language,
data=result,
expire=INTEREST_CACHE_EXPIRE,
)
initialized += 1
logger.info(f"用户 {end_user_id} 兴趣分布缓存生成成功")
except Exception as e:
failed += 1
logger.error(f"用户 {end_user_id} 兴趣分布缓存生成失败: {e}")
await InterestMemoryCache.set_interest_distribution(
end_user_id=end_user_id,
language=language,
data=result,
expire=INTEREST_CACHE_EXPIRE,
)
initialized += 1
logger.info(f"用户 {end_user_id} 兴趣分布缓存生成成功")
except Exception as e:
failed += 1
logger.error(f"用户 {end_user_id} 兴趣分布缓存生成失败: {e}")
logger.info(f"兴趣分布按需初始化完成: 初始化={initialized}, 跳过={skipped}, 失败={failed}")
return {
@@ -2914,4 +2915,270 @@ def init_community_clustering_for_users(self, end_user_ids: List[str], workspace
}
# ─── User Metadata Extraction Task ───────────────────────────────────────────
def _update_timestamps(existing: dict, new: dict, updated_at: dict, now: str, prefix: str = "") -> None:
"""对比新旧元数据,更新变更字段的 _updated_at 时间戳。"""
for key, new_val in new.items():
if key == "_updated_at":
continue
path = f"{prefix}.{key}" if prefix else key
old_val = existing.get(key)
if isinstance(new_val, dict) and isinstance(old_val, dict):
_update_timestamps(old_val, new_val, updated_at, now, prefix=path)
elif old_val != new_val:
updated_at[path] = now
@celery_app.task(
bind=True,
name='app.tasks.extract_user_metadata',
ignore_result=False,
max_retries=0,
acks_late=True,
time_limit=300,
soft_time_limit=240,
)
def extract_user_metadata_task(
self,
end_user_id: str,
statements: List[str],
config_id: Optional[str] = None,
language: str = "zh",
) -> Dict[str, Any]:
"""异步提取用户元数据并写入数据库。
在去重消歧完成后由编排器触发,使用独立 LLM 调用提取元数据。
LLM 配置优先使用 config_id 对应的应用配置,失败时回退到工作空间默认配置。
Args:
end_user_id: 终端用户 ID
statements: 用户相关的 statement 文本列表
config_id: 应用配置 ID可选
language: 语言类型 ("zh" 中文, "en" 英文)
Returns:
包含任务执行结果的字典
"""
start_time = time.time()
logger.info(
f"[CELERY METADATA] Starting metadata extraction - end_user_id={end_user_id}, "
f"statements_count={len(statements)}, config_id={config_id}, language={language}"
)
async def _run() -> Dict[str, Any]:
from app.core.memory.storage_services.extraction_engine.knowledge_extraction.metadata_extractor import MetadataExtractor
from app.repositories.end_user_info_repository import EndUserInfoRepository
from app.repositories.end_user_repository import EndUserRepository
from app.services.memory_config_service import MemoryConfigService
# 1. 获取 LLM 配置(应用配置 → 工作空间配置兜底)并创建 LLM client
with get_db_context() as db:
end_user_uuid = uuid.UUID(end_user_id)
# 获取 workspace_id from end_user
end_user = EndUserRepository(db).get_by_id(end_user_uuid)
if not end_user:
return {"status": "FAILURE", "error": f"End user not found: {end_user_id}"}
workspace_id = end_user.workspace_id
config_service = MemoryConfigService(db)
memory_config = config_service.get_config_with_fallback(
memory_config_id=uuid.UUID(config_id) if config_id else None,
workspace_id=workspace_id,
)
if not memory_config:
return {"status": "FAILURE", "error": "No LLM config available (app + workspace fallback failed)"}
# 2. 创建 LLM client
from app.core.memory.utils.llm.llm_utils import MemoryClientFactory
factory = MemoryClientFactory(db)
if not memory_config.llm_id:
return {"status": "FAILURE", "error": "Memory config has no LLM model configured"}
llm_client = factory.get_llm_client(memory_config.llm_id)
# 2.5 读取已有元数据和别名,传给 extractor 作为上下文
existing_metadata = None
existing_aliases = None
try:
info = EndUserInfoRepository(db).get_by_end_user_id(end_user_uuid)
if info:
if info.meta_data:
existing_metadata = info.meta_data
existing_aliases = info.aliases if info.aliases else []
logger.info(f"[CELERY METADATA] 已读取已有元数据和别名aliases={existing_aliases}")
except Exception as e:
logger.warning(f"[CELERY METADATA] 读取已有数据失败(继续无上下文提取): {e}")
# 3. 提取元数据和别名(传入已有数据作为上下文)
extractor = MetadataExtractor(llm_client=llm_client, language=language)
extract_result = await extractor.extract_metadata(
statements,
existing_metadata=existing_metadata,
existing_aliases=existing_aliases,
)
if not extract_result:
logger.info(f"[CELERY METADATA] No metadata extracted for end_user_id={end_user_id}")
return {"status": "SUCCESS", "result": "no_metadata_extracted"}
user_metadata, aliases_to_add, aliases_to_remove = extract_result
logger.info(f"[CELERY METADATA] LLM 别名新增: {aliases_to_add}, 移除: {aliases_to_remove}")
# 4. 清洗元数据、覆盖写入元数据和别名
def clean_metadata(raw: dict) -> dict:
"""递归移除空字符串、空列表、空字典。"""
result = {}
for k, v in raw.items():
if v == "" or v == []:
continue
if isinstance(v, dict):
cleaned = clean_metadata(v)
if cleaned:
result[k] = cleaned
else:
result[k] = v
return result
raw_dict = user_metadata.model_dump(exclude_none=True) if user_metadata else {}
logger.info(f"[CELERY METADATA] LLM 输出完整元数据: {json.dumps(raw_dict, ensure_ascii=False)}")
cleaned = clean_metadata(raw_dict) if raw_dict else {}
logger.info(f"[CELERY METADATA] 清洗后元数据: {json.dumps(cleaned, ensure_ascii=False)}")
from datetime import datetime as dt, timezone as tz
now = dt.now(tz.utc).isoformat()
# 过滤别名中的占位名称,执行增量增删
_PLACEHOLDER_NAMES = {"用户", "", "user", "i"}
def _filter_aliases(aliases_list):
seen = set()
result = []
for a in aliases_list:
a_stripped = a.strip()
if a_stripped and a_stripped.lower() not in _PLACEHOLDER_NAMES and a_stripped.lower() not in seen:
result.append(a_stripped)
seen.add(a_stripped.lower())
return result
filtered_add = _filter_aliases(aliases_to_add)
filtered_remove = _filter_aliases(aliases_to_remove)
remove_lower = {a.lower() for a in filtered_remove}
with get_db_context() as db:
end_user_uuid = uuid.UUID(end_user_id)
info = EndUserInfoRepository(db).get_by_end_user_id(end_user_uuid)
end_user = EndUserRepository(db).get_by_id(end_user_uuid)
if info:
# 元数据覆盖写入
if cleaned:
existing_meta = info.meta_data if info.meta_data else {}
updated_at = dict(existing_meta.get("_updated_at", {}))
_update_timestamps(existing_meta, cleaned, updated_at, now)
final = dict(cleaned)
final["_updated_at"] = updated_at
info.meta_data = final
logger.info("[CELERY METADATA] 覆盖写入元数据")
# 别名增量增删:(已有 - remove) + add
old_aliases = info.aliases if info.aliases else []
# 先移除
merged = [a for a in old_aliases if a.strip().lower() not in remove_lower]
# 再追加(去重)
existing_lower = {a.strip().lower() for a in merged}
for a in filtered_add:
if a.lower() not in existing_lower:
merged.append(a)
existing_lower.add(a.lower())
if merged != old_aliases:
info.aliases = merged
# other_name 更新逻辑
if merged and (
not info.other_name
or info.other_name.strip().lower() in _PLACEHOLDER_NAMES
or info.other_name.strip().lower() in remove_lower
):
info.other_name = merged[0]
if end_user and merged and (
not end_user.other_name
or end_user.other_name.strip().lower() in _PLACEHOLDER_NAMES
or end_user.other_name.strip().lower() in remove_lower
):
end_user.other_name = merged[0]
logger.info(
f"[CELERY METADATA] 别名增量更新: {old_aliases} - {filtered_remove} + {filtered_add}{merged}"
)
else:
# 没有 end_user_info 记录,创建一条
from app.models.end_user_info_model import EndUserInfo
initial_aliases = filtered_add # 新记录只有 add没有 remove
first_alias = initial_aliases[0] if initial_aliases else ""
if first_alias or cleaned:
new_info = EndUserInfo(
end_user_id=end_user_uuid,
other_name=first_alias or "",
aliases=initial_aliases,
meta_data=cleaned if cleaned else None,
)
db.add(new_info)
if end_user and first_alias and (
not end_user.other_name or end_user.other_name.strip().lower() in _PLACEHOLDER_NAMES
):
end_user.other_name = first_alias
logger.info(f"[CELERY METADATA] 创建 end_user_info: other_name={first_alias}, aliases={initial_aliases}")
else:
return {"status": "SUCCESS", "result": "no_data_to_write"}
db.commit()
# 同步 PgSQL aliases 到 Neo4j 用户实体PgSQL 为权威源)
final_aliases = info.aliases if info else initial_aliases
if final_aliases:
try:
from app.repositories.neo4j.neo4j_connector import Neo4jConnector
neo4j_connector = Neo4jConnector()
cypher = """
MATCH (e:ExtractedEntity)
WHERE e.end_user_id = $end_user_id AND e.name IN ['用户', '', 'User', 'I']
SET e.aliases = $aliases
"""
await neo4j_connector.execute_query(
cypher, end_user_id=end_user_id, aliases=final_aliases
)
await neo4j_connector.close()
logger.info(f"[CELERY METADATA] Neo4j 用户实体 aliases 已同步: {final_aliases}")
except Exception as neo4j_err:
logger.warning(f"[CELERY METADATA] Neo4j aliases 同步失败(不影响主流程): {neo4j_err}")
return {"status": "SUCCESS", "result": "metadata_and_aliases_written"}
loop = None
try:
loop = set_asyncio_event_loop()
result = loop.run_until_complete(_run())
elapsed = time.time() - start_time
result["elapsed_time"] = elapsed
result["task_id"] = self.request.id
logger.info(f"[CELERY METADATA] Task completed - elapsed={elapsed:.2f}s, result={result.get('result')}")
return result
except Exception as e:
elapsed = time.time() - start_time
logger.error(f"[CELERY METADATA] Task failed - elapsed={elapsed:.2f}s, error={e}", exc_info=True)
return {
"status": "FAILURE",
"error": str(e),
"elapsed_time": elapsed,
"task_id": self.request.id,
}
finally:
if loop:
_shutdown_loop_gracefully(loop)
# unused task

View File

@@ -1,4 +1,36 @@
{
"v0.2.10": {
"introduction": {
"codeName": "炼剑",
"releaseDate": "2026-4-8",
"upgradePosition": "🐻 全面强化工作流引擎、引入 Agent 深度思考模式与多模态记忆读取,百炼成锋,剑指生产就绪",
"coreUpgrades": [
"1. 工作流引擎增强<br>* 会话变量文件格式支持:支持文件类型值及本地/远程默认值配置<br>* 列表操作节点:新增专用列表操作节点<br>* 模板转换支持 HTML扩展富内容渲染能力<br>* 表单返回与提交:工作流返回交互式表单,前端支持提交<br>* HTTP 节点 XML 响应:拓宽企业级 API 集成兼容性<br>* 开场白与文件引用:支持配置开场白及附件引用<br>* 模板转换三级变量:支持深层嵌套变量访问<br>* 节点连线添加按钮:连线处新增内联添加按钮",
"2. Agent 智能 🧠<br>* Agent 深度思考模式:支持更充分的推理以产出高质量回答<br>* 模型深度思考特性开关:模型级特性标识与应用级开关控制",
"3. 记忆系统升级 📚<br>* 用户记忆库分页:支持大规模记忆集合分页浏览<br>* RAG 用户记忆数据结构刷新:后端 API 数据结构重新设计<br>* 多模态记忆读取:支持检索图像、音频等非文本记忆<br>* 语义剪枝阈值提示文案:显示描述性区间标签",
"4. 前端与体验 🎨<br>* 技能工具删除状态展示:工具列表显示删除状态标识<br>* 仪表盘日环比数据:关键指标增加与昨日对比数据",
"5. 稳健性与缺陷修复 🔧<br>* 参数提取空值处理:优雅处理缺失数据<br>* Token 消耗展示优化:确保用量报告准确<br>* 模型参数负值修复:明确参数范围定义<br>* 应用共享删除同步:正确更新所有共享记录<br>* 记忆写入任务排序:按时间戳顺序执行<br>* 多模态模型缺失优雅处理:不再中断感知记忆写入<br>* 自定义工具 Number 变量传递:解决类型转换问题<br>* 集群子代理保存后显示:修复未反显问题<br>* 记忆开启后流式输出修复:解决字符串序列化问题",
"<br>",
"v0.2.10 标志着平台向生产成熟度迈出的重要一步。深度思考、交互式表单工作流与多模态记忆的结合展现了平台从记忆存储向综合认知基础设施的演进。我们期待 4 月 17 日 v0.3.0 发布会,届时将带来更深层的 Agent 推理能力、多智能体协作功能及记忆智能管线的进一步优化。剑已炼成,只待出鞘。",
"MemoryBear — 百炼成锋 🐻✨"
]
},
"introduction_en": {
"codeName": "LianJian",
"releaseDate": "2026-4-8",
"upgradePosition": "🐻 Comprehensive workflow engine enhancements, Agent deep thinking mode, and multimodal memory reading — forging the blade for production readiness",
"coreUpgrades": [
"1. Workflow Engine Enhancements<br>* Session Variable File Support: File-type values with local/remote defaults<br>* List Operation Node: Dedicated node for array manipulation<br>* Template Conversion HTML Support: Rich-content rendering<br>* Form Return & Submission: Interactive forms in workflow conversations<br>* HTTP Node XML Response: Enterprise API integration compatibility<br>* Opening Remarks & File References: Configurable conversation openers<br>* Template Conversion Three-Level Variables: Deep nested variable access<br>* Node Connection Add Button: Inline add button on connections",
"2. Agent Intelligence 🧠<br>* Agent Deep Thinking Mode: Thorough reasoning for complex queries<br>* Model Deep Thinking Feature Toggle: Model-level flag with per-app control",
"3. Memory System Upgrades 📚<br>* User Memory Pagination: Paginated browsing for large collections<br>* RAG User Memory Data Structure Refresh: Redesigned backend API contracts<br>* Multimodal Memory Reading: Retrieval of image, audio, and non-text memory<br>* Semantic Pruning Threshold Hints: Descriptive range labels for configuration",
"4. Frontend & Usability 🎨<br>* Skill Tool Deletion Status Display: Deletion indicators in tool list<br>* Dashboard Day-over-Day Comparison: Key metrics with yesterday comparison",
"5. Robustness & Bug Fixes 🔧<br>* Parameter Extraction Null Handling: Graceful handling of missing data<br>* Token Consumption Display Optimization: Accurate usage reporting<br>* Model Parameter Negative Value Fix: Clear parameter range definitions<br>* App Share Deletion Sync: Correct update of all share records<br>* Memory Write Task Ordering: Chronological execution per end_user<br>* Multimodal Model Missing Graceful Handling: No more interrupted writes<br>* Custom Tool Number Variable Pass-through: Type coercion fix<br>* Cluster Sub-Agent Display After Save: Fixed UI reflection<br>* Memory-Enabled Streaming Output Fix: String serialization resolved",
"<br>",
"v0.2.10 marks a significant step toward production maturity. The combination of deep thinking, interactive form workflows, and multimodal memory demonstrates the platform's evolution from memory storage to comprehensive cognitive infrastructure. We look forward to the v0.3.0 launch on April 17, bringing deeper agent reasoning, multi-agent collaboration, and further memory intelligence refinements. The blade has been forged — now it's time to wield it.",
"MemoryBear — Forging the Blade 🐻✨"
]
}
},
"v0.2.8": {
"introduction": {
"codeName": "景玉",

View File

@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="16px" height="16px" viewBox="0 0 16 16" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>参与</title>
<g id="空间里层页面优化" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="应用管理-工作流-配置-开始" transform="translate(-1173, -24)" fill="#000000" fill-rule="nonzero">
<g id="编组-11" transform="translate(1166, 17)">
<g id="参与" transform="translate(7, 7)">
<g id="编组" transform="translate(1.5, 1)">
<path d="M9.66581309,0 C11.5071324,0 12.9999203,1.50297946 12.9999203,3.35712964 L12.9999203,6.99997709 C12.9999203,7.34514783 12.7220975,7.62497504 12.3793991,7.62497504 C12.0367007,7.62497504 11.7588778,7.34514783 11.7588778,6.99997709 L11.7588778,3.35712964 C11.7588778,2.19344595 10.8218287,1.24999591 9.66581309,1.24999591 L3.33410726,1.24999591 C2.17807615,1.24999591 1.24104252,2.19344595 1.24104252,3.35712964 L1.24104252,10.6428245 C1.24104252,11.8065082 2.17809167,12.7499583 3.33410726,12.7499583 L6.04769325,12.7499583 C6.39040715,12.7499583 6.66821451,13.0297855 6.66821451,13.3749562 C6.66821451,13.720127 6.39040715,13.9999542 6.04769325,13.9999542 L3.33410726,13.9999542 C1.49278799,13.9999542 0,12.4969747 0,10.6428245 L0,3.35712964 C0,1.50297946 1.49278799,0 3.33410726,0 L9.66581309,0 Z" id="路径"></path>
<path d="M11.8585646,8.937002 C12.0448761,8.6472842 12.4290718,8.56453447 12.7167144,8.75215885 C13.0043726,8.93981449 13.0865296,9.3267976 12.9002336,9.6165154 L10.2649729,13.7147051 C10.0576723,14.0370947 9.61342558,14.0966257 9.3296457,13.8400641 L7.8566058,12.5082872 C7.60157156,12.2777254 7.58041179,11.8825705 7.80932208,11.6256963 C8.03824788,11.3688222 8.43057245,11.3475097 8.68560669,11.5780715 L9.61814154,12.4211937 L11.8585646,8.93698637 L11.8585646,8.937002 Z M9.21354617,4.09820534 C9.55624455,4.09820534 9.83406743,4.37801692 9.83406743,4.72320329 C9.83406743,5.06837404 9.55624455,5.34820125 9.21354617,5.34820125 L3.78637417,5.34820125 C3.4436758,5.34820125 3.16585292,5.06837404 3.16585292,4.72320329 C3.16585292,4.37801692 3.4436758,4.09820534 3.78637417,4.09820534 L9.21354617,4.09820534 Z M9.21354617,7.74105279 C9.55624455,7.74105279 9.83406743,8.02086437 9.83406743,8.36605074 C9.83406743,8.71122149 9.55624455,8.9910487 9.21354617,8.9910487 L3.78637417,8.9910487 C3.4436758,8.9910487 3.16585292,8.71122149 3.16585292,8.36605074 C3.16585292,8.02086437 3.4436758,7.74105279 3.78637417,7.74105279 L9.21354617,7.74105279 Z" id="形状"></path>
</g>
</g>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

@@ -1,12 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="16px" height="16px" viewBox="0 0 16 16" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>参与</title>
<g id="空间里层页面优化" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="应用管理-工作流-配置-开始" transform="translate(-1211, -24)" fill="#171719" fill-rule="nonzero">
<g id="空间里层页面优化" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd" stroke-linecap="round">
<g id="应用管理-工作流-配置-开始" transform="translate(-1211, -24)" stroke="#171719" stroke-width="1.2">
<g id="编组-11" transform="translate(1204, 17)">
<g id="参与" transform="translate(7, 7)">
<g id="编组-35" transform="translate(0.5, 1.5)">
<path d="M13.3524137,3.04473843 C13.7876396,3.04473843 14.1979604,3.21975634 14.507738,3.53746403 C14.8173619,3.85501246 14.9923132,4.28005597 15,4.73408333 L15,10.2997805 C15,10.7391566 14.8365789,11.1556006 14.5400225,11.472512 C14.2665266,11.7647393 13.9083222,11.9416683 13.5224454,11.9771815 L13.5155273,13.3373525 C13.5155273,13.6047366 13.3547197,13.8526919 13.1155068,13.9536577 C13.021728,13.9861451 12.9450138,14 12.8673773,14 C12.6896587,14 12.521318,13.9261071 12.40494,13.797113 L10.6609614,11.9676263 L8.48098801,11.9676263 C8.12370606,11.9676263 7.83314543,11.6666401 7.83314543,11.2965385 C7.83314543,10.926437 8.12370606,10.6254507 8.48098801,10.6254507 L10.9635134,10.6254507 C11.1415394,10.6459942 11.2911243,10.7176576 11.3904376,10.8283378 L12.2272215,11.7015163 L12.2272215,11.2966978 C12.2272215,10.9371068 12.5239315,10.6334133 12.8750641,10.6334133 L13.3674798,10.6334133 C13.5491954,10.6334133 13.6969355,10.4803722 13.6969355,10.2921364 L13.6969355,4.72819101 C13.6969355,4.53995518 13.5491954,4.38691404 13.3674798,4.38691404 C13.0060469,4.38691404 12.7121041,4.08592781 12.7121041,3.71582623 C12.7121041,3.34588391 12.9994363,3.04473843 13.3524137,3.04473843 Z M10.4203649,0 C11.3164907,0 12.0455058,0.755172845 12.0455058,1.68345258 L12.0455058,8.25976271 C12.0455058,9.18804245 11.3131085,9.94305605 10.4129855,9.94305605 L5.13154658,9.94305605 L2.58091627,12.7683453 C2.45792764,12.9048242 2.29081685,12.9799911 2.11017731,12.9799911 C2.03346315,12.9799911 1.94675618,12.9634289 1.87234806,12.9344451 C1.62268115,12.8328423 1.462181,12.5875943 1.462181,12.3089033 L1.46940658,9.93604896 C1.08614328,9.89719148 0.730552424,9.71962553 0.459055037,9.42946844 C0.163113662,9.11319403 0,8.69770563 0,8.25960346 L0,1.68345258 C0,0.755172845 0.729015066,0 1.62514092,0 L10.4203649,0 Z M10.4208261,1.33453151 L1.62560213,1.33453151 C1.44388644,1.33453151 1.29614636,1.48757266 1.29614636,1.67580849 L1.29614636,8.25976271 C1.29614636,8.44799854 1.44388644,8.60103969 1.62560213,8.60103969 L2.14030952,8.60103969 C2.31449216,8.60103969 2.48329405,8.67588811 2.60320795,8.80663398 C2.72066209,8.93451331 2.78092651,9.10061312 2.77323973,9.27467552 C2.77323973,9.29060072 2.77293225,9.31273675 2.76570667,9.34156135 L2.75924977,10.5990149 L4.38623552,8.79532709 C4.40698985,8.76475071 4.43450856,8.74850701 4.45295685,8.73879264 L4.47217382,8.72557473 C4.48293533,8.71872689 4.49246695,8.71171981 4.50276724,8.70407571 C4.52905606,8.68464697 4.56180178,8.66028142 4.60638516,8.64483398 L4.65634929,8.63161607 L4.67326022,8.63161607 C4.70446859,8.62078693 4.73721431,8.61362059 4.76857641,8.60788752 L4.78302757,8.59291784 L10.4208261,8.59291784 C10.6025418,8.59291784 10.7502818,8.43987669 10.7502818,8.25164086 L10.7502818,1.67580849 C10.7502818,1.48757266 10.6025418,1.33453151 10.4208261,1.33453151 Z M3.1173004,4.58263471 C3.34559803,4.58263471 3.55975197,4.70939928 3.67612996,4.9132418 C3.85062007,5.22505716 4.19252844,5.51999181 4.63943835,5.74469634 C5.09157528,5.9719489 5.59213898,6.09712095 6.04904171,6.09712095 C6.99467049,6.09712095 8.05882956,5.56983768 8.42149226,4.92168216 C8.53833145,4.71688412 8.7524854,4.59011955 8.98078303,4.59011955 C9.1026955,4.59011955 9.21599877,4.62181069 9.30870145,4.68184869 C9.45797889,4.76513747 9.56497899,4.908942 9.61033104,5.08714495 C9.64815005,5.27283275 9.62339859,5.4460989 9.53976632,5.59356622 C8.85594957,6.7876375 7.23096239,7.43165247 6.04904171,7.43165247 C4.85989546,7.43165247 3.22968125,6.78477096 2.55078405,5.58608138 C2.46745926,5.43256248 2.4448601,5.24862645 2.48775238,5.07966011 C2.53125961,4.90830499 2.63549247,4.76816326 2.78892077,4.67468235 C2.89146254,4.61273333 2.99877011,4.58263471 3.1173004,4.58263471 Z" id="形状结合"></path>
<g id="编组-35" transform="translate(1, 2)">
<path d="M1.5,0 L9.5,0 C10.3284271,-2.22044605e-16 11,0.671572875 11,1.5 L11,7.5 C11,8.32842712 10.3284271,9 9.5,9 L4.16268077,9 L4.16268077,9 L1.61845449,11.7671841 L1.61845449,9 L1.5,9 C0.671572875,9 2.22044605e-16,8.32842712 2.22044605e-16,7.5 L0,1.5 C0,0.671572875 0.671572875,-2.22044605e-16 1.5,-2.22044605e-16 Z" id="矩形" stroke-linejoin="round"></path>
<path d="M14,10.7913467 L14,10.7913467 L11.889042,10.7913467 L9.95430953,12.7671841 L9.95430953,10.7913467 L9.33585504,10.7913467 C8.78357029,10.7913467 8.33585504,10.3436314 8.33585504,9.79134668 L8.33585504,4.21772464 C8.33585504,3.66543989 8.78357029,3.21772464 9.33585504,3.21772464 L9.35694395,3.21772464 L9.35694395,3.21772464" id="路径" stroke-linejoin="round" transform="translate(11.1679, 7.9925) scale(-1, 1) translate(-11.1679, -7.9925)"></path>
<path d="M2.57312498,4.7624569 C3.0405519,5.21866557 5.40029607,7.88187743 8.56554211,4.7624569" id="路径-22"></path>
</g>
</g>
</g>

Before

Width:  |  Height:  |  Size: 4.4 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@@ -2,7 +2,7 @@
* @Author: ZhaoYing
* @Date: 2025-12-10 16:46:17
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-04-08 11:23:18
* @Last Modified time: 2026-04-10 18:46:57
*/
import { type FC, useRef, useEffect, useState } from 'react'
import clsx from 'clsx'
@@ -217,7 +217,7 @@ const ChatContent: FC<ChatContentProps> = ({
{/* Message bubble */}
<div className={clsx('rb:text-left rb:leading-5 rb:inline-block rb:wrap-break-word rb:relative', item.role === 'user' ? contentClassNames : '', {
// Error message style (content is null and not assistant message)
'rb:bg-[rgba(255,93,52,0.08)] rb:text-[#FF5D34]': (item.status && item.status !== 'completed') || (errorDesc && item.role === 'assistant' && item.content === null && !renderRuntime),
'rb:text-[#FF5D34]': (item.status && item.status !== 'completed') || (errorDesc && item.role === 'assistant' && item.content === null && !renderRuntime),
// Assistant message style
'rb:bg-[#E3EBFD] rb:p-[10px_12px_2px_12px] rb:rounded-lg rb:max-w-130': item.role === 'user',
'rb:max-w-full rb:w-full': item.role === 'assistant',

View File

@@ -12,6 +12,14 @@
font-weight: 500;
font-style: normal;
}
.breadcrumbTitle {
display: inline-block;
max-width: 200px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
vertical-align: bottom;
}
.header :global(.ant-breadcrumb) {
line-height: 31px;
}

View File

@@ -2,7 +2,7 @@
* @Author: ZhaoYing
* @Date: 2026-02-02 15:07:49
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-04-07 12:18:58
* @Last Modified time: 2026-04-16 11:10:19
*/
/**
* AppHeader Component
@@ -14,7 +14,7 @@
*/
import { type FC, useRef, useState } from 'react';
import { Layout, Dropdown, Breadcrumb, Flex } from 'antd';
import { Layout, Dropdown, Breadcrumb, Flex, Tooltip } from 'antd';
import type { MenuProps, BreadcrumbProps } from 'antd';
import { useTranslation } from 'react-i18next';
import { useLocation } from 'react-router-dom';
@@ -76,9 +76,11 @@ const AppHeader: FC<{source?: 'space' | 'manage';}> = ({source = 'manage'}) => {
const userMenuItems: MenuProps['items'] = [
{
key: '1',
icon: <Flex align="center" justify="center" className="rb:size-10 rb:rounded-xl rb:bg-[#155EEF] rb:text-white">
{/[\u4e00-\u9fa5]/.test(user.username) ? user.username.slice(0, 2) : user.username?.[0]}
</Flex>,
icon: user.username
? <Flex align="center" justify="center" className="rb:size-10 rb:rounded-xl rb:bg-[#155EEF] rb:text-white">
{/[\u4e00-\u9fa5]/.test(user.username) ? user.username.slice(-2) : user.username[0]}
</Flex>
: null,
label: (<>
<div className="rb:text-[#212332] rb:leading-5">{user.username}</div>
<div className="rb:text-[12px] rb:text-[#7B8085] rb:leading-4.5 rb:mt-0.5 rb:mr-2">{user.email}</div>
@@ -135,28 +137,30 @@ const AppHeader: FC<{source?: 'space' | 'manage';}> = ({source = 'manage'}) => {
* - Disables navigation for the last breadcrumb item
*/
const formatBreadcrumbNames = () => {
return breadcrumbs.filter(item => item.type !== 'group').map((menu, index) => {
const filtered = breadcrumbs.filter(item => item.type !== 'group');
return filtered.map((menu, index) => {
const label = menu.i18nKey ? t(menu.i18nKey) : menu.label;
const isLast = index === filtered.length - 1;
const item: any = {
title: menu.i18nKey ? t(menu.i18nKey) : menu.label,
title: (
<Tooltip title={label} placement="bottom">
<span className={styles.breadcrumbTitle}>{label}</span>
</Tooltip>
),
};
// If it's the last item, don't set path
if (index === breadcrumbs.length - 1) {
return item;
if (!isLast) {
if ((menu as any).onClick) {
item.onClick = (e: React.MouseEvent) => {
e.preventDefault();
(menu as any).onClick(e);
};
item.href = '#';
} else if (menu.path && menu.path !== '#') {
item.path = menu.path;
}
}
// If has custom onClick, use onClick and set href to '#' to show pointer cursor
if ((menu as any).onClick) {
item.onClick = (e: React.MouseEvent) => {
e.preventDefault();
(menu as any).onClick(e);
};
item.href = '#';
} else if (menu.path && menu.path !== '#') {
// Only set path when path is not '#'
item.path = menu.path;
}
return item;
});
}
@@ -179,9 +183,9 @@ const AppHeader: FC<{source?: 'space' | 'manage';}> = ({source = 'manage'}) => {
overlayClassName={styles.userDropdown}
>
<Flex align="center" className="rb:cursor-pointer rb:font-medium">
<Flex align="center" justify="center" className="rb:size-8 rb:rounded-xl rb:bg-[#155EEF] rb:text-white rb:mr-2!">
{/[\u4e00-\u9fa5]/.test(user.username) ? user.username.slice(user.username.length, -2) : user.username[0]}
</Flex>
{user.username && <Flex align="center" justify="center" className="rb:size-8 rb:rounded-xl rb:bg-[#155EEF] rb:text-white rb:mr-2!">
{/[\u4e00-\u9fa5]/.test(user.username) ? user.username.slice(-2) : user.username[0]}
</Flex>}
<span className="rb:text-[#212332] rb:text-[12px] rb:leading-4 rb:mr-1">{user.username}</span>
<div className={clsx("rb:size-3 rb:bg-cover rb:bg-[url('@/assets/images/common/arrow_up.svg')]", {
'rb:rotate-180': !open,

View File

@@ -116,7 +116,7 @@ export const en = {
prompt: 'Prompt Engineering',
skills: 'Skill Library',
workbench: 'Workbench',
memoryRelated: 'Memory-Related',
memoryRelated: 'Memory Hub',
advancedSettings: 'Advanced Settings',
promptHistory: 'My history',
platformManagement: 'Platform Management',
@@ -1396,6 +1396,43 @@ export const en = {
pleaseUploadFile: 'Please upload file',
setting: 'Settings',
features: 'Conversation Features',
checkList: 'Check List',
checkListDesc: 'Ensure all issues are resolved before publishing',
checkListEmpty: 'No issues found',
notConnected: 'This node is not connected to other nodes',
goto: 'Go to',
cannotBeEmpty: 'cannot be empty',
checkListErrors: {
'llm.model_id': 'Model',
'llm.messages': 'Messages',
'end.output': 'Output',
'knowledge-retrieval.knowledge_retrieval': 'Knowledge bases',
'parameter-extractor.model_id': 'Model',
'parameter-extractor.text': 'Input variable',
'parameter-extractor.params': 'Params',
'memory-read.message': 'Message',
'memory-read.config_id': 'Memory config',
'memory-read.search_switch': 'Search mode',
'memory-write.messages': 'Messages',
'memory-write.config_id': 'Memory config',
'if-else.cases': 'Condition',
'question-classifier.model_id': 'Model',
'question-classifier.input_variable': 'Input variable',
'question-classifier.categories': 'Categories',
'iteration.input': 'Input variable',
'iteration.output': 'Output variable',
'var-aggregator.group_variables': 'Variables',
'assigner.assignments': 'Variables',
'http-request.url': 'API URL',
'http-request.body.data': 'Binary file variable',
'code.input_variables': 'Input variables',
'code.code': 'Code',
'code.output_variables': 'Output variables',
'jinja-render.mapping': 'Input variables',
'jinja-render.template': 'Template',
'document-extractor.file_selector': 'File variable',
'list-operator.input_list': 'Input list',
},
file_upload: 'File Upload',
file_upload_desc: 'The chat input box supports file uploads. Types include images, documents, and other types',
settings: 'File Upload Settings',
@@ -1459,6 +1496,32 @@ export const en = {
resetFeaturesTip: 'Please reconfigure the [Conversation Features - File Upload] settings',
logTitle: 'Description',
range: 'Range',
body: 'BODY Parameter Example',
bodyRequestExample: `{
"message": "user message content",
// string, required, the conversation content entered by the user;
"conversation_id": "conversation_id",
// string, optional, session ID; for multi-turn conversations, pass the conversation_id from the previous response; omit on first request;
"user_id": "user_id",
// string, optional, end-user identifier to distinguish memory and sessions across users; recommended to pass your business system user ID;
"variables": {},
// object, optional (requires application configuration to take effect);
"stream": false,
// boolean, optional, whether to stream the response; defaults to false; when true, returns an SSE event stream;
"thinking": false,
// boolean, optional, whether to enable deep thinking; defaults to false (requires application configuration when true);
"files": [],
// array, optional, list of multimodal attachments (requires application configuration to take effect);
"version":"app_release_id"
// string, optional, application version ID; specify a historical release version ID, or omit to use the currently active version;
}`,
},
userMemory: {
userMemory: 'User Memory',
@@ -2113,6 +2176,19 @@ Memory Bear: After the rebellion, regional warlordism intensified for several re
return_text_position_enable: 'Return Text Position Info',
return_text_position_enable_desc: 'Whether to return coordinate positions of recognized text',
OpenClawTool_desc: 'OpenClaw Remote Agent',
OpenClawTool_features: 'OpenClaw Remote Agent — 3D Printing and Device Management',
OpenClawTool_config_desc: 'Configure OpenClaw Gateway connection. Server URL and API Key are required.',
OpenClawTool_server_url_desc: 'OpenClaw Gateway server URL, e.g. http://xxx.xxx.xxx.xx:xxx',
OpenClawTool_api_key_desc: 'OpenClaw API Key, created in OpenClaw admin console',
OpenClawTool_agent_id_desc: 'Target Agent ID, defaults to main, usually no need to change',
OpenClawTool_enable: 'Enable OpenClaw',
agent_id: 'Agent ID',
'3dPrinting': '3D Printing',
deviceManagement: 'Device Management',
multimodalInteraction: 'Multimodal Interaction',
remoteAgent: 'Remote Agent',
addCustom: 'Add Custom Tool',
editCustom: 'Edit Custom Tool',
schema: 'Schema',
@@ -2189,6 +2265,7 @@ Memory Bear: After the rebellion, regional warlordism intensified for several re
addvariable: 'Chat Variables',
addChatVariable: 'Add Chat Variable',
editChatVariable: 'Edit Chat Variable',
invalidJSON: 'Invalid JSON format',
config: {
llm: {
@@ -2291,6 +2368,11 @@ Memory Bear: After the rebellion, regional warlordism intensified for several re
"eq": 'Is',
"ne": 'Is Not',
},
file: {
"empty": 'Not Exist',
"not_empty": 'Exists',
eq: 'All Are'
},
else_desc: 'Used to define the logic that should be executed when the if condition is not met.',
unset: 'Condition Not Set',
set: 'Set',
@@ -2403,7 +2485,8 @@ Memory Bear: After the rebellion, regional warlordism intensified for several re
value: 'Value',
addCase: 'Add Condition',
addVariable: 'Add Variables',
output: 'Output Variable'
output: 'Output Variable',
duplicateName: 'Variable name cannot be duplicated',
},
clear: 'Clear',
@@ -2429,7 +2512,49 @@ Memory Bear: After the rebellion, regional warlordism intensified for several re
iteration: 'Iteration',
input_cycle_vars: 'Initial Loop Variables',
output_cycle_vars: 'Final Loop Variables',
}
},
sureReplace: 'Confirm Replace',
checkList: 'Check List',
checkListDesc: 'Ensure all issues are resolved before publishing',
checkListEmpty: 'No issues found',
notConnected: 'This node is not connected to other nodes',
goto: 'Go to',
cannotBeEmpty: 'cannot be empty',
checkListErrors: {
'llm.model_id': 'Model',
'llm.messages': 'Messages',
'end.output': 'Output',
'knowledge-retrieval.knowledge_retrieval': 'Knowledge bases',
'parameter-extractor.model_id': 'Model',
'parameter-extractor.text': 'Input variable',
'parameter-extractor.params': 'Params',
'memory-read.message': 'Message',
'memory-read.config_id': 'Memory config',
'memory-read.search_switch': 'Search mode',
'memory-write.messages': 'Messages',
'memory-write.config_id': 'Memory config',
'if-else.cases': 'Condition',
'question-classifier.model_id': 'Model',
'question-classifier.input_variable': 'Input variable',
'question-classifier.categories': 'Categories',
'iteration.input': 'Input variable',
'iteration.output': 'Output variable',
'var-aggregator.group_variables': 'Variables',
'assigner.assignments': 'Variables',
'http-request.url': 'API URL',
'http-request.body.data': 'Binary file variable',
'code.input_variables': 'Input variables',
'code.code': 'Code',
'code.output_variables': 'Output variables',
'jinja-render.mapping': 'Input variables',
'jinja-render.template': 'Template',
'document-extractor.file_selector': 'File variable',
'list-operator.input_list': 'Input list',
},
checkListHasErrors: 'Please resolve all issues in the checklist before publishing',
variableSelect: {
empty: 'No variables available',
},
},
emotionEngine: {
emotionEngineConfig: 'Emotion Engine Configuration',

View File

@@ -116,7 +116,7 @@ export const zh = {
prompt: '提示词工程',
skills: '技能库',
workbench: '工作台',
memoryRelated: '记忆相关',
memoryRelated: '记忆中枢',
advancedSettings: '高级设置',
promptHistory: '我的历史',
platformManagement: '平台管理',
@@ -831,6 +831,32 @@ export const zh = {
resetFeaturesTip: '请重新配置【对话功能-文件上传】功能',
logTitle: '描述',
range: '范围',
body: 'BODY 参数示例',
bodyRequestExample: `{
"message": "用户消息内容",
// string必填用户输入的对话内容
"conversation_id": "conversation_id",
// string可选会话ID多轮对话时传上一次返回的conversation_id首次不传
"user_id": "user_id",
// string可选终端用户标识用于区分不同用户的记忆和会话建议传业务系统的用户ID
"variables": {},
// object可选需要应用配置才支持生效
"stream": false,
// boolean可选是否流式返回默认 falsetrue时返回SSE事件流
"thinking": false,
// boolean可选是否启用深度思考默认 falsetrue时需要应用配置才支持生效
"files": [],
// array可选多模态附件列表需要应用配置才支持生效
"version":"app_release_id"
//string可选应用版本ID指定历史发布版本ID不传则使用当前生效版本
}`,
},
table: {
totalRecords: '共 {{total}} 条记录'
@@ -2109,6 +2135,21 @@ export const zh = {
return_text_position_enable: '返回文本位置信息',
return_text_position_enable_desc: '是否返回识别文字的坐标位置',
OpenClawTool_desc: 'OpenClaw远程Agent',
OpenClawTool_features: 'OpenClaw远程Agent —3D打印控制、设备管理等',
OpenClawTool_config_desc: '配置OpenClaw Gateway连接信息需要提供服务地址和API Key。',
OpenClawTool_server_url_desc: 'OpenClaw Gateway 服务地址,如 http://xxx.xxx.xxx.xx:xxx',
OpenClawTool_api_key_desc: 'OpenClaw API Key在 OpenClaw 管理后台创建',
OpenClawTool_agent_id_desc: '目标 Agent ID默认为 main通常无需修改',
OpenClawTool_enable: '启用 OpenClaw',
agent_id: 'Agent ID',
'3dPrinting': '3D 打印',
deviceManagement: '设备管理',
multimodalInteraction: '多模态交互',
remoteAgent: '远程 Agent',
addCustom: '添加自定义工具',
editCustom: '编辑自定义工具',
schema: 'Schema',
@@ -2185,6 +2226,7 @@ export const zh = {
addvariable: '会话变量',
addChatVariable: '添加会话变量',
editChatVariable: '编辑会话变量',
invalidJSON: 'JSON 格式不正确',
config: {
llm: {
@@ -2287,6 +2329,11 @@ export const zh = {
"eq": '是',
"ne": '不是',
},
file: {
"empty": '不存在',
"not_empty": '存在',
eq: '全都是'
},
else_desc: '用于定义当 if 条件不满足时应执行的逻辑。',
unset: '条件未设置',
set: '已设置',
@@ -2402,7 +2449,8 @@ export const zh = {
value: '值',
addCase: '添加条件',
addVariable: '添加变量',
output: '输出变量'
output: '输出变量',
duplicateName: '变量名不能重复',
},
clear: '清空',
@@ -2430,6 +2478,47 @@ export const zh = {
output_cycle_vars: '最终循环变量',
},
sureReplace: '确认替换',
checkList: '检查清单',
checkListDesc: '发布前确保所有问题均已解决',
checkListEmpty: '没有发现问题',
notConnected: '此节点尚未连接到其他节点',
goto: '转到',
cannotBeEmpty: '不能为空',
checkListErrors: {
'llm.model_id': '模型',
'llm.messages': '提示词',
'end.output': '回复',
'knowledge-retrieval.knowledge_retrieval': '知识库',
'parameter-extractor.model_id': '模型',
'parameter-extractor.text': '输入变量',
'parameter-extractor.params': '提取参数',
'memory-read.message': '消息',
'memory-read.config_id': '记忆配置',
'memory-read.search_switch': '检索模式',
'memory-write.messages': '消息',
'memory-write.config_id': '记忆配置',
'if-else.cases': '条件',
'question-classifier.model_id': '模型',
'question-classifier.input_variable': '输入变量',
'question-classifier.categories': '分类',
'iteration.input': '输入变量',
'iteration.output': '输出变量',
'var-aggregator.group_variables': '变量',
'assigner.assignments': '变量',
'http-request.url': 'API URL',
'http-request.body.data': 'binary文件类型变量',
'code.input_variables': '输入变量',
'code.code': '代码',
'code.output_variables': '输出变量',
'jinja-render.mapping': '输入变量',
'jinja-render.template': '模板',
'document-extractor.file_selector': '文件变量',
'list-operator.input_list': '输入变量',
},
checkListHasErrors: '发布前确认检查清单中所有问题均已解决',
variableSelect: {
empty: '暂无变量',
},
},
emotionEngine: {
emotionEngineConfig: '情感引擎配置',

21
web/src/store/workflow.ts Normal file
View File

@@ -0,0 +1,21 @@
/*
* @Author: ZhaoYing
* @Date: 2026-04-10 18:11:19
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-04-10 18:11:19
*/
import { create } from 'zustand'
import type { NodeCheckResult } from '@/views/Workflow/components/CheckList'
interface WorkflowState {
checkResults: Record<string, NodeCheckResult[]>
setCheckResults: (appId: string, results: NodeCheckResult[]) => void
getCheckResults: (appId: string) => NodeCheckResult[]
}
export const useWorkflowStore = create<WorkflowState>((set, get) => ({
checkResults: {},
setCheckResults: (appId, results) =>
set(state => ({ checkResults: { ...state.checkResults, [appId]: results } })),
getCheckResults: (appId) => get().checkResults[appId] ?? [],
}))

View File

@@ -420,4 +420,7 @@ body {
.ant-picker-outlined:focus,
.ant-picker-outlined:focus-within {
box-shadow: none;
}
.ͼ1.cm-focused {
outline: none;
}

View File

@@ -229,7 +229,11 @@ const Agent = forwardRef<AgentRef, { onFeaturesLoad?: (features: FeaturesConfigF
...knowledgeRest,
knowledge_bases: knowledge_bases.map(item => ({
kb_id: item.kb_id || item.id,
...(item.config || {})
retrieve_type: item.retrieve_type,
top_k: item.top_k,
similarity_threshold: item.similarity_threshold,
vector_similarity_weight: item.vector_similarity_weight,
// ...(item.config || {})
}))
} as KnowledgeConfig : null,
tools: tools.map(vo => {

View File

@@ -2,7 +2,7 @@
* @Author: ZhaoYing
* @Date: 2026-02-03 16:29:29
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-03-26 15:31:36
* @Last Modified time: 2026-04-10 18:09:56
*/
import { type FC, useState, useRef, useEffect } from 'react';
import clsx from 'clsx';
@@ -18,6 +18,7 @@ import ApiKeyConfigModal from './components/ApiKeyConfigModal';
import { getApiKeyList, getApiKeyStats, deleteApiKey } from '@/api/apiKey';
import { maskApiKeys } from '@/utils/apiKeyReplacer'
import RbCard from '@/components/RbCard/Card';
import CodeMirrorEditor from '@/components/CodeMirrorEditor'
/**
* API configuration page component
@@ -155,6 +156,21 @@ const Api: FC<{ application: Application | null }> = ({ application }) => {
{t('common.copy')}
</Button>
</Flex>
<div className="rb:font-medium rb:mt-4!">
{t('application.body')}
</div>
<Flex align="start" justify="space-between" className="rb:text-[#5B6167] rb:mt-3! rb:py-2! rb:px-4! rb:bg-white rb-border rb:rounded-lg rb:leading-5">
<CodeMirrorEditor readOnly={true} value={t('application.bodyRequestExample')} />
<Button className="rb:px-2! rb:h-7! rb:group" onClick={() => handleCopy(t('application.bodyRequestExample'))}>
<div
className="rb:w-4 rb:h-4 rb:cursor-pointer rb:bg-cover rb:bg-[url('@/assets/images/copy.svg')] rb:group-hover:bg-[url('@/assets/images/copy_active.svg')]"
></div>
{t('common.copy')}
</Button>
</Flex>
</RbCard>
<RbCard
title={() => (<Flex align="center">

View File

@@ -2,12 +2,13 @@
* @Author: ZhaoYing
* @Date: 2026-02-03 16:29:41
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-03-26 15:24:41
* @Last Modified time: 2026-04-10 17:02:07
*/
import { type FC, useState, useEffect, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import clsx from 'clsx';
import { Space, Input, Form, App, Flex } from 'antd';
import copy from 'copy-to-clipboard';
import Tag, { type TagProps } from './components/Tag'
import RbCard from '@/components/RbCard/Card'
@@ -17,6 +18,7 @@ import ReleaseShareModal from './components/ReleaseShareModal'
import AppSharingModal from './components/AppSharingModal'
import type { Release, ReleaseModalRef, ReleaseShareModalRef, AppSharingModalRef } from './types'
import type { Application } from '@/views/ApplicationManagement/types'
import { useWorkflowStore } from '@/store/workflow'
import Empty from '@/components/Empty'
import { formatDateTime } from '@/utils/format';
import Markdown from '@/components/Markdown'
@@ -40,6 +42,7 @@ const heightClass = 'rb:max-h-[calc(100vh-140px)]'
const ReleasePage: FC<{data: Application; refresh: () => void}> = ({data, refresh}) => {
const { t } = useTranslation();
const { message } = App.useApp()
const { getCheckResults } = useWorkflowStore()
const releaseModalRef = useRef<ReleaseModalRef>(null)
const releaseShareModalRef = useRef<ReleaseShareModalRef>(null)
const appSharingModalRef = useRef<AppSharingModalRef>(null)
@@ -75,6 +78,10 @@ const ReleasePage: FC<{data: Application; refresh: () => void}> = ({data, refres
if (!selectedVersion) return
appExport(data.id, data.name, { release_id: selectedVersion.id})
}
const handleCopy = (id: string) => {
copy(id)
message.success(t('common.copySuccess'))
}
return (
<Flex gap={12}>
<div className="rb:w-101 rb:h-full">
@@ -102,7 +109,7 @@ const ReleasePage: FC<{data: Application; refresh: () => void}> = ({data, refres
</Tag>}
</>}
className={clsx("rb:hover:shadow-[0px_2px_8px_0px_rgba(0,0,0,0.2)]! rb:cursor-pointer rb:bg-white", {
'rb:border-[#171719]!': version.id === selectedVersion.id,
'rb:border! rb:border-[#171719]!': version.id === selectedVersion.id,
'rb:border-[#DFE4ED] ': version.id !== selectedVersion.id
})}
headerType="borderless"
@@ -140,13 +147,30 @@ const ReleasePage: FC<{data: Application; refresh: () => void}> = ({data, refres
<RbButton type="primary" ghost onClick={() => releaseShareModalRef.current?.handleOpen()}>{t('application.share')}</RbButton>
{data?.type !== 'multi_agent' && <RbButton type="primary" ghost onClick={() => appSharingModalRef.current?.handleOpen()}>{t('application.sharing')}</RbButton>}
</>}
<RbButton type="primary" onClick={() => releaseModalRef.current?.handleOpen()}>{t('application.release')}</RbButton>
<RbButton type="primary" onClick={async () => {
if (data?.type === 'workflow') {
const errors = getCheckResults(data.id)
if (errors.length) {
message.error(t('workflow.checkListHasErrors'))
return
}
}
releaseModalRef.current?.handleOpen()
}}>{t('application.release')}</RbButton>
</Space>
</Flex>
{selectedVersion &&
<Flex gap={16} vertical className={`${heightClass} rb:overflow-y-auto`}>
<RbCard
title={t('application.VersionInformation')}
title={() => <Flex>{t('application.VersionInformation')}
<Flex align="center" className="rb:text-[#5B6167] rb:text-[12px]">
(ID: {selectedVersion.id}
<div className="rb:size-4.5 rb:ml-1 rb:cursor-pointer rb:bg-cover rb:bg-[url('@/assets/images/common/copy_dark.svg')]"
onClick={() => handleCopy(selectedVersion.id)}
></div>
)
</Flex>
</Flex>}
headerType="borderless"
>
<div className="rb:grid rb:grid-cols-3 rb:gap-4">

View File

@@ -2,7 +2,7 @@
* @Author: ZhaoYing
* @Date: 2026-02-03 16:27:39
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-03-31 15:02:07
* @Last Modified time: 2026-04-10 18:51:43
*/
/**
* Chat debugging component for application testing
@@ -291,7 +291,6 @@ const Chat: FC<ChatProps> = ({
addAssistantMessage()
const handleStreamMessage = (data: SSEMessage[]) => {
setCompareLoading(false)
data.map(item => {
const { model_config_id, conversation_id, content, message_length, audio_url, citations } = item.data as {
@@ -306,12 +305,21 @@ const Chat: FC<ChatProps> = ({
switch (item.event) {
case 'model_reasoning':
if (compareLoading) {
setCompareLoading(false)
}
updateAssistantReasoningMessage(content, model_config_id, conversation_id)
break;
case 'model_message':
if (compareLoading) {
setCompareLoading(false)
}
updateAssistantMessage(content, model_config_id, conversation_id, audio_url)
break;
case 'model_end':
if (compareLoading) {
setCompareLoading(false)
}
const idToPoll = `${model_config_id}_${audio_url}`
if (audio_url && !audioStatusMap[idToPoll]) {
setAudioStatusMap(prev => ({
@@ -352,6 +360,9 @@ const Chat: FC<ChatProps> = ({
updateErrorAssistantMessage(message_length, model_config_id)
break;
case 'compare_end':
if (compareLoading) {
setCompareLoading(false)
}
setLoading(false);
break;
}
@@ -473,7 +484,6 @@ const Chat: FC<ChatProps> = ({
addClusterAssistantMessage()
const handleStreamMessage = (data: SSEMessage[]) => {
setCompareLoading(false)
data.map(item => {
const { conversation_id, content, message_length } = item.data as { conversation_id: string, content: string, message_length: number };
@@ -485,15 +495,24 @@ const Chat: FC<ChatProps> = ({
}
break
case 'message':
if (compareLoading) {
setCompareLoading(false)
}
updateClusterAssistantMessage(content)
if (conversation_id && conversationId !== conversation_id) {
setConversationId(conversation_id);
}
break;
case 'model_end':
if (compareLoading) {
setCompareLoading(false)
}
updateClusterErrorAssistantMessage(message_length)
break;
case 'compare_end':
if (compareLoading) {
setCompareLoading(false)
}
setLoading(false);
break;
}

View File

@@ -4,7 +4,7 @@
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-04-07 16:28:33
*/
import { type FC, useRef, useMemo, useCallback } from 'react';
import { type FC, useRef, useMemo } from 'react';
import { useNavigate, useParams } from 'react-router-dom';
import { Tabs, Dropdown, Flex, Popover } from 'antd';
import type { MenuProps } from 'antd';
@@ -18,6 +18,7 @@ import type { CopyModalRef, AgentRef, ClusterRef, WorkflowRef, FeaturesConfigFor
import { deleteApplication, appExport } from '@/api/application'
import CopyModal from './CopyModal'
import PageHeader from '@/components/Layout/PageHeader'
import CheckList from '@/views/Workflow/components/CheckList'
/**
* Tab keys for application configuration
@@ -206,6 +207,7 @@ const ConfigHeader: FC<ConfigHeaderProps> = ({
</Flex>}
extra={application?.type === 'workflow' && source !== 'sharing' && activeTab === 'arrangement'
? <Flex align="center" justify="end" gap={10} className="rb:h-8">
<CheckList workflowRef={workflowRef} appId={application?.id ?? ''} />
<Popover content={t('application.features')} classNames={{ body: 'rb:py-0.5! rb:px-1! rb:rounded-[6px]! rb:text-[12px]!' }}>
<div
className="rb:cursor-pointer rb:size-7.5 rb:border rb:border-[#EBEBEB] rb:hover:bg-[#F6F6F6] rb:rounded-[10px] rb:bg-[url('@/assets/images/workflow/features.svg')] rb:bg-size-[16px_16px] rb:bg-center rb:bg-no-repeat"

View File

@@ -2,7 +2,7 @@
* @Author: ZhaoYing
* @Date: 2026-03-05
* @Last Modified by: ZhaoYing
* @Last Modified time: 2026-04-07 16:58:10
* @Last Modified time: 2026-04-13 15:13:36
*/
import { forwardRef, useImperativeHandle, useState } from 'react';
import { Button, Form, Input, Flex, App } from 'antd';
@@ -36,8 +36,6 @@ const OpenStatementSettingModal = forwardRef<OpenStatementSettingModalRef, OpenS
const [visible, setVisible] = useState(false);
const [form] = Form.useForm<FeaturesConfigForm['opening_statement']>();
console.log('chatVariables', chatVariables)
const handleClose = () => {
setVisible(false);
form.resetFields();

View File

@@ -117,6 +117,7 @@ const Knowledge: FC<{value?: KnowledgeConfig; onChange?: (config: KnowledgeConfi
const list = [...knowledgeList]
list[index] = {
...list[index],
...values,
config: {...values as KnowledgeConfigForm}
}
setKnowledgeList([...list])

View File

@@ -33,7 +33,7 @@ interface KnowledgeConfigModalProps {
* Available retrieval types
*/
const retrieveTypes: RetrieveType[] = ['participle', 'semantic', 'hybrid',
// 'graph'
'graph'
]
/**

View File

@@ -88,6 +88,10 @@ const KnowledgeListModal = forwardRef<KnowledgeModalRef, KnowledgeModalProps>(({
const handleSave = () => {
refresh(selectedRows.map(item => ({
...item,
similarity_threshold: 0.7,
retrieve_type: "hybrid",
top_k: 3,
weight: 1,
config: {
similarity_threshold: 0.7,
retrieve_type: "hybrid",

View File

@@ -155,12 +155,10 @@ const ModelConfigModal = forwardRef<ModelConfigModalRef, ModelConfigModalProps>(
</FormItem>
{['model', 'chat'].includes(source) && <>
<FormItem name="capability" hidden />
{(values?.deep_thinking || values?.capability?.includes('thinking')) && (
<FormItem name="deep_thinking" valuePropName="checked">
<Checkbox>{t('application.deep_thinking')}</Checkbox>
</FormItem>
)}
</>}
<FormItem name="deep_thinking" valuePropName="checked" hidden={!['model', 'chat'].includes(source) || !(values?.deep_thinking || values?.capability?.includes('thinking'))}>
<Checkbox>{t('application.deep_thinking')}</Checkbox>
</FormItem>
{source === 'chat' && <FormItem name="label" hidden />}

View File

@@ -12,6 +12,7 @@ import type { ChatVariable, GraphRef, WorkflowConfig } from '@/views/Workflow/ty
import type { ApiKey } from '@/views/ApiKeyManagement/types'
import type { SkillConfigForm } from './components/Skill/types'
import type { Capability } from '@/views/ModelManagement/types'
import { Node } from '@antv/x6';
/**
* Model configuration parameters
@@ -170,6 +171,7 @@ export interface WorkflowRef {
features: WorkflowConfig['features'];
handleFeaturesConfig?: () => void;
handleSaveFeaturesConfig?: (value: FeaturesConfigForm) => void;
nodeClick: ({ node }: { node: Node }) => void;
}
/**

View File

@@ -94,7 +94,7 @@ const CreateDataset = () => {
const [processingMethod, setProcessingMethod] = useState<ProcessingMethod>('directBlock');
const [parameterSettings, setParameterSettings] = useState<ParameterSettings>('defaultSettings');
const [pdfEnhancementEnabled, setPdfEnhancementEnabled] = useState<boolean>(true);
const [pdfEnhancementMethod, setPdfEnhancementMethod] = useState<string>('deepdoc');
const [pdfEnhancementMethod, setPdfEnhancementMethod] = useState<string>('mineru');
const fileType = ['pdf', 'doc', 'docx', 'xls', 'xlsx', 'csv', 'md', 'htm', 'html', 'json', 'ppt', 'pptx', 'txt','png','jpg','mp3','mp4','mov','wav']
const steps = useMemo(
() => [

View File

@@ -106,6 +106,10 @@ const ModelList = forwardRef<BaseRef, { query: any; handleEdit: (vo?: ModelListI
/>
<ModelListDetail
ref={modelListDetailRef}
query={{
...query,
is_composite: false,
}}
refresh={getList}
handleEdit={handleEdit}
handleCloseConfig={handleCloseModel}

View File

@@ -32,12 +32,13 @@ interface ModelListDetailProps {
refresh?: () => void;
handleEdit: (vo?: ModelListItem) => void;
handleCloseConfig?: () => void;
query?: any;
}
/**
* Model list detail drawer component
*/
const ModelListDetail = forwardRef<ModelListDetailRef, ModelListDetailProps>(({ refresh, handleEdit, handleCloseConfig }, ref) => {
const ModelListDetail = forwardRef<ModelListDetailRef, ModelListDetailProps>(({ refresh, handleEdit, handleCloseConfig, query }, ref) => {
const { t } = useTranslation();
const [open, setOpen] = useState(false);
const [data, setData] = useState<ProviderModelItem>({} as ProviderModelItem)
@@ -58,7 +59,8 @@ const ModelListDetail = forwardRef<ModelListDetailRef, ModelListDetailProps>(({
if (!vo.provider) return
getModelNewList({
provider: vo.provider
provider: vo.provider,
...query,
})
.then(res => {
const response = res as ProviderModelItem[]

View File

@@ -101,13 +101,13 @@ const Inner: React.FC<{ getStatusTag: (status: string) => ReactNode; keyword?: s
<Flex gap={8} wrap align="center" className="rb:mt-2! rb:mb-4!">
<Flex gap={6}>
{InnerConfigData[item.config_data.tool_class].features?.slice(0, 2).map((type, i) => (
<div key={i} className="rb:bg-[#F6F6F6] rb:rounded-md rb:py-px rb:px-1 rb:text-[12px] rb:leading-4.5">{type}</div>
<div key={i} className="rb:bg-[#F6F6F6] rb:rounded-md rb:py-px rb:px-1 rb:text-[12px] rb:leading-4.5">{t(`tool.${type}`)}</div>
))}
</Flex>
{InnerConfigData[item.config_data.tool_class].features.length > 2 && (
<Tooltip
title={<Flex wrap gap={6}>{InnerConfigData[item.config_data.tool_class].features?.slice(2, InnerConfigData[item.config_data.tool_class].features.length).map((type, i) => (
<div key={i} className="rb:bg-[#F6F6F6] rb:rounded-md rb:py-px rb:px-1 rb:text-[12px] rb:leading-4.5 rb:text-[#171719]">{type}</div>
<div key={i} className="rb:bg-[#F6F6F6] rb:rounded-md rb:py-px rb:px-1 rb:text-[12px] rb:leading-4.5 rb:text-[#171719]">{t(`tool.${type}`)}</div>
))}</Flex>}
color="white"
placement="bottom"
@@ -135,7 +135,7 @@ const Inner: React.FC<{ getStatusTag: (status: string) => ReactNode; keyword?: s
{InnerConfigData[item.config_data.tool_class].eg}
</Col>
: <Col span={24}>
<div className="rb:text-[#5B6167] rb:mb-1">{t('configStatus')}</div>
<div className="rb:text-[#5B6167] rb:mb-1">{t('tool.configStatus')}</div>
{t(`tool.${item.status}_desc`)}
</Col>
}

View File

@@ -186,5 +186,43 @@ export const InnerConfigData: Record<string, InnerConfigItem> = {
'multilingualSupport',
'highPrecisionRecognition'
],
},
OpenClawTool: {
link: 'https://openclaw.ai/',
config: {
server_url: {
name: ['config', 'parameters', 'server_url'],
type: 'input',
desc: 'OpenClawTool_server_url_desc',
rules: [
{ required: true, message: 'common.pleaseEnter' }
]
},
api_key: {
name: ['config', 'parameters', 'api_key'],
type: 'input',
desc: 'OpenClawTool_api_key_desc',
rules: [
{ required: true, message: 'common.pleaseEnter' }
]
},
agent_id: {
name: ['config', 'parameters', 'agent_id'],
type: 'input',
desc: 'OpenClawTool_agent_id_desc',
defaultValue: 'main',
},
OpenClawTool_enable: {
name: ['config', 'is_enabled'],
type: 'checkbox',
defaultValue: true,
},
},
features: [
'3dPrinting',
'deviceManagement',
'multimodalInteraction',
'remoteAgent'
],
}
}

Some files were not shown because too many files have changed in this diff Show More