mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-03-20 13:58:30 +08:00
- Use LLM to summarize discarded context into concise daily memory entries - Batch trim to half when exceeding max_turns/max_tokens, reducing flush frequency - Run summarization asynchronously in background thread, no blocking on replies - Add daily scheduled flush (23:55) as fallback for low-activity days - Sync trimmed messages back to agent to keep context state consistent
24 lines
745 B
Python
24 lines
745 B
Python
"""
|
|
Memory module for AgentMesh
|
|
|
|
Provides both long-term memory (vector/keyword search) and short-term
|
|
conversation history persistence (SQLite).
|
|
"""
|
|
|
|
from agent.memory.manager import MemoryManager
|
|
from agent.memory.config import MemoryConfig, get_default_memory_config, set_global_memory_config
|
|
from agent.memory.embedding import create_embedding_provider
|
|
from agent.memory.conversation_store import ConversationStore, get_conversation_store
|
|
from agent.memory.summarizer import ensure_daily_memory_file
|
|
|
|
__all__ = [
|
|
'MemoryManager',
|
|
'MemoryConfig',
|
|
'get_default_memory_config',
|
|
'set_global_memory_config',
|
|
'create_embedding_provider',
|
|
'ConversationStore',
|
|
'get_conversation_store',
|
|
'ensure_daily_memory_file',
|
|
]
|