Skip to content

Commit e7b4ea4

Browse files
Wang-Daojiyuan.wangfridayL
authored
Feat/fix palyground bug (#655)
* fix playground bug, internet search judge * fix playground internet bug * modify delete mem * modify tool resp bug in multi cube * fix bug in playground chat handle and search inter * modify prompt * fix bug in playground * fix bug playfround * fix bug * fix code * fix model bug in playground * modify plan b * llm param modify * add logger in playground * modify code * fix bug * modify code * modify code * fix bug --------- Co-authored-by: yuan.wang <[email protected]> Co-authored-by: chunyu li <[email protected]>
1 parent 35b192f commit e7b4ea4

File tree

5 files changed

+71
-23
lines changed

5 files changed

+71
-23
lines changed

src/memos/api/handlers/chat_handler.py

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -421,17 +421,21 @@ def generate_chat_response() -> Generator[str, None, None]:
421421
query=chat_req.query,
422422
user_id=chat_req.user_id,
423423
readable_cube_ids=readable_cube_ids,
424-
mode=chat_req.mode,
424+
mode="fast",
425425
internet_search=False,
426-
top_k=chat_req.top_k,
426+
top_k=5,
427427
chat_history=chat_req.history,
428428
session_id=chat_req.session_id,
429-
include_preference=chat_req.include_preference,
429+
include_preference=False,
430430
pref_top_k=chat_req.pref_top_k,
431431
filter=chat_req.filter,
432+
search_tool_memory=False,
432433
playground_search_goal_parser=False,
433434
)
435+
start_time = time.time()
434436
search_response = self.search_handler.handle_search_memories(search_req)
437+
end_time = time.time()
438+
self.logger.info(f"first search time: {end_time - start_time}")
435439

436440
yield f"data: {json.dumps({'type': 'status', 'data': '1'})}\n\n"
437441

@@ -447,18 +451,9 @@ def generate_chat_response() -> Generator[str, None, None]:
447451

448452
# Prepare reference data (first search)
449453
reference = prepare_reference_data(filtered_memories)
450-
# get preference string
451-
pref_string = search_response.data.get("pref_string", "")
452454

453455
yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n"
454456

455-
# Prepare preference markdown string
456-
if chat_req.include_preference:
457-
pref_list = search_response.data.get("pref_mem") or []
458-
pref_memories = pref_list[0].get("memories", []) if pref_list else []
459-
pref_md_string = self._build_pref_md_string_for_playground(pref_memories)
460-
yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n"
461-
462457
# parse goal for internet search
463458
searcher = self.dependencies.searcher
464459
parsed_goal = searcher.task_goal_parser.parse(
@@ -487,17 +482,22 @@ def generate_chat_response() -> Generator[str, None, None]:
487482
or chat_req.query + (f"{parsed_goal.tags}" if parsed_goal.tags else ""),
488483
user_id=chat_req.user_id,
489484
readable_cube_ids=readable_cube_ids,
490-
mode=chat_req.mode,
485+
mode="fast",
491486
internet_search=chat_req.internet_search,
492487
top_k=chat_req.top_k,
493488
chat_history=chat_req.history,
494489
session_id=chat_req.session_id,
495-
include_preference=False,
490+
include_preference=chat_req.include_preference,
491+
pref_top_k=chat_req.pref_top_k,
496492
filter=chat_req.filter,
497493
search_memory_type="All",
494+
search_tool_memory=False,
498495
playground_search_goal_parser=False,
499496
)
497+
start_time = time.time()
500498
search_response = self.search_handler.handle_search_memories(search_req)
499+
end_time = time.time()
500+
self.logger.info(f"second search time: {end_time - start_time}")
501501

502502
# Extract memories from search results (second search)
503503
memories_list = []
@@ -516,12 +516,19 @@ def generate_chat_response() -> Generator[str, None, None]:
516516

517517
# Prepare remain reference data (second search)
518518
reference = prepare_reference_data(filtered_memories)
519+
# get preference string
520+
pref_string = search_response.data.get("pref_string", "")
519521
# get internet reference
520522
internet_reference = self._get_internet_reference(
521523
search_response.data.get("text_mem")[0]["memories"]
522524
)
523-
524525
yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n"
526+
# Prepare preference markdown string
527+
if chat_req.include_preference:
528+
pref_list = search_response.data.get("pref_mem") or []
529+
pref_memories = pref_list[0].get("memories", []) if pref_list else []
530+
pref_md_string = self._build_pref_md_string_for_playground(pref_memories)
531+
yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n"
525532

526533
# Step 2: Build system prompt with memories
527534
system_prompt = self._build_enhance_system_prompt(

src/memos/mem_reader/read_multi_modal/system_parser.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""Parser for system messages."""
22

3+
import ast
34
import json
45
import re
56
import uuid
@@ -137,8 +138,14 @@ def parse_fine(
137138
tool_schema = json.loads(content)
138139
assert isinstance(tool_schema, list), "Tool schema must be a list[dict]"
139140
except json.JSONDecodeError:
140-
logger.warning(f"[SystemParser] Failed to parse tool schema: {content}")
141-
return []
141+
try:
142+
tool_schema = ast.literal_eval(content)
143+
assert isinstance(tool_schema, list), "Tool schema must be a list[dict]"
144+
except (ValueError, SyntaxError, AssertionError):
145+
logger.warning(
146+
f"[SystemParser] Failed to parse tool schema with both JSON and ast.literal_eval: {content}"
147+
)
148+
return []
142149
except AssertionError:
143150
logger.warning(f"[SystemParser] Tool schema must be a list[dict]: {content}")
144151
return []

src/memos/memories/textual/tree.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -343,6 +343,17 @@ def delete_all(self) -> None:
343343
logger.error(f"An error occurred while deleting all memories: {e}")
344344
raise
345345

346+
def delete_by_filter(
347+
self,
348+
writable_cube_ids: list[str],
349+
file_ids: list[str] | None = None,
350+
filter: dict | None = None,
351+
) -> None:
352+
"""Delete memories by filter."""
353+
self.graph_store.delete_node_by_prams(
354+
writable_cube_ids=writable_cube_ids, file_ids=file_ids, filter=filter
355+
)
356+
346357
def load(self, dir: str) -> None:
347358
try:
348359
memory_file = os.path.join(dir, self.config.memory_filename)

src/memos/memories/textual/tree_text_memory/retrieve/searcher.py

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -701,15 +701,35 @@ def _sort_and_trim(
701701
"""Sort results by score and trim to top_k"""
702702
final_items = []
703703
if search_tool_memory:
704-
tool_results = [
704+
tool_schema_results = [
705705
(item, score)
706706
for item, score in results
707-
if item.metadata.memory_type in ["ToolSchemaMemory", "ToolTrajectoryMemory"]
707+
if item.metadata.memory_type == "ToolSchemaMemory"
708708
]
709-
sorted_tool_results = sorted(tool_results, key=lambda pair: pair[1], reverse=True)[
710-
:tool_mem_top_k
709+
sorted_tool_schema_results = sorted(
710+
tool_schema_results, key=lambda pair: pair[1], reverse=True
711+
)[:tool_mem_top_k]
712+
for item, score in sorted_tool_schema_results:
713+
if plugin and round(score, 2) == 0.00:
714+
continue
715+
meta_data = item.metadata.model_dump()
716+
meta_data["relativity"] = score
717+
final_items.append(
718+
TextualMemoryItem(
719+
id=item.id,
720+
memory=item.memory,
721+
metadata=SearchedTreeNodeTextualMemoryMetadata(**meta_data),
722+
)
723+
)
724+
tool_trajectory_results = [
725+
(item, score)
726+
for item, score in results
727+
if item.metadata.memory_type == "ToolTrajectoryMemory"
711728
]
712-
for item, score in sorted_tool_results:
729+
sorted_tool_trajectory_results = sorted(
730+
tool_trajectory_results, key=lambda pair: pair[1], reverse=True
731+
)[:tool_mem_top_k]
732+
for item, score in sorted_tool_trajectory_results:
713733
if plugin and round(score, 2) == 0.00:
714734
continue
715735
meta_data = item.metadata.model_dump()

src/memos/multi_mem_cube/single_cube.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
SearchMode,
3131
UserContext,
3232
)
33+
from memos.utils import timed
3334

3435

3536
logger = get_logger(__name__)
@@ -198,6 +199,7 @@ def _get_search_mode(self, mode: str) -> str:
198199
"""
199200
return mode
200201

202+
@timed
201203
def _search_text(
202204
self,
203205
search_req: APISearchRequest,
@@ -363,6 +365,7 @@ def _fine_search(
363365

364366
return formatted_memories
365367

368+
@timed
366369
def _search_pref(
367370
self,
368371
search_req: APISearchRequest,
@@ -429,7 +432,7 @@ def _fast_search(
429432
top_k=search_req.top_k,
430433
mode=SearchMode.FAST,
431434
manual_close_internet=not search_req.internet_search,
432-
momory_type=search_req.search_memory_type,
435+
memory_type=search_req.search_memory_type,
433436
search_filter=search_filter,
434437
search_priority=search_priority,
435438
info={

0 commit comments

Comments
 (0)