Update README.MD and add nano-claude-code v3.0 + original-source-code/src

- README.MD: add original-source-code and nano-claude-code sections, update
  overview table (4 subprojects), add v3.0 news entry, expand comparison table
  with memory/multi-agent/skills dimensions
- nano-claude-code v3.0: multi-agent package (multi_agent/), memory package
  (memory/), skill package (skill/) with built-in /commit and /review skills,
  context compression (compaction.py), tool registry plugin system, diff view,
  17 slash commands, 18 built-in tools, 101 tests (~5000 lines total)
- original-source-code/src: add raw TypeScript source tree (1884 files)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
chauncygu
2026-04-03 10:26:29 -07:00
parent 3de4c595ea
commit 1d4ffa964d
1942 changed files with 521644 additions and 112 deletions

View File

@@ -0,0 +1,86 @@
"""Memory package for nano-claude-code.
Provides persistent, file-based memory across conversations.
Storage layout:
user scope : ~/.nano_claude/memory/<slug>.md (shared across projects)
project scope : .nano_claude/memory/<slug>.md (local to cwd)
The MEMORY.md index in each directory is auto-maintained and injected
into the system prompt so Claude has an overview of available memories.
Public API (backward-compatible with the old memory.py module):
MemoryEntry — dataclass for a single memory
save_memory() — write/update a memory file
delete_memory() — remove a memory file
load_index() — load all entries from one or both scopes
search_memory() — keyword search across entries
get_memory_context() — MEMORY.md content for system prompt injection
"""
from .store import ( # noqa: F401
MemoryEntry,
save_memory,
delete_memory,
load_index,
load_entries,
search_memory,
get_index_content,
parse_frontmatter,
USER_MEMORY_DIR,
INDEX_FILENAME,
MAX_INDEX_LINES,
MAX_INDEX_BYTES,
)
from .scan import ( # noqa: F401
MemoryHeader,
scan_memory_dir,
scan_all_memories,
format_memory_manifest,
memory_age_days,
memory_age_str,
memory_freshness_text,
)
from .context import ( # noqa: F401
get_memory_context,
find_relevant_memories,
truncate_index_content,
)
from .types import ( # noqa: F401
MEMORY_TYPES,
MEMORY_TYPE_DESCRIPTIONS,
MEMORY_SYSTEM_PROMPT,
WHAT_NOT_TO_SAVE,
)
__all__ = [
# store
"MemoryEntry",
"save_memory",
"delete_memory",
"load_index",
"load_entries",
"search_memory",
"get_index_content",
"parse_frontmatter",
"USER_MEMORY_DIR",
"INDEX_FILENAME",
"MAX_INDEX_LINES",
"MAX_INDEX_BYTES",
# scan
"MemoryHeader",
"scan_memory_dir",
"scan_all_memories",
"format_memory_manifest",
"memory_age_days",
"memory_age_str",
"memory_freshness_text",
# context
"get_memory_context",
"find_relevant_memories",
"truncate_index_content",
# types
"MEMORY_TYPES",
"MEMORY_TYPE_DESCRIPTIONS",
"MEMORY_SYSTEM_PROMPT",
"WHAT_NOT_TO_SAVE",
]

View File

@@ -0,0 +1,221 @@
"""Memory context building for system prompt injection.
Provides:
get_memory_context() — full context string for system prompt
find_relevant_memories() — keyword (+ optional AI) relevance filtering
truncate_index_content() — line + byte truncation with warning
"""
from __future__ import annotations
from pathlib import Path
from .store import (
USER_MEMORY_DIR,
INDEX_FILENAME,
MAX_INDEX_LINES,
MAX_INDEX_BYTES,
get_memory_dir,
get_index_content,
load_entries,
search_memory,
)
from .scan import scan_all_memories, format_memory_manifest, memory_freshness_text
from .types import MEMORY_SYSTEM_PROMPT
# ── Index truncation ───────────────────────────────────────────────────────
def truncate_index_content(raw: str) -> str:
"""Truncate MEMORY.md content to line AND byte limits, appending a warning.
Matches Claude Code's truncateEntrypointContent:
- Line-truncates first (natural boundary)
- Then byte-truncates at the last newline before the cap
- Appends which limit fired
"""
trimmed = raw.strip()
content_lines = trimmed.split("\n")
line_count = len(content_lines)
byte_count = len(trimmed.encode())
was_line_truncated = line_count > MAX_INDEX_LINES
was_byte_truncated = byte_count > MAX_INDEX_BYTES
if not was_line_truncated and not was_byte_truncated:
return trimmed
truncated = "\n".join(content_lines[:MAX_INDEX_LINES]) if was_line_truncated else trimmed
if len(truncated.encode()) > MAX_INDEX_BYTES:
# Cut at last newline before byte limit
raw_bytes = truncated.encode()
cut = raw_bytes[:MAX_INDEX_BYTES].rfind(b"\n")
truncated = raw_bytes[: cut if cut > 0 else MAX_INDEX_BYTES].decode(errors="replace")
if was_byte_truncated and not was_line_truncated:
reason = f"{byte_count:,} bytes (limit: {MAX_INDEX_BYTES:,}) — index entries are too long"
elif was_line_truncated and not was_byte_truncated:
reason = f"{line_count} lines (limit: {MAX_INDEX_LINES})"
else:
reason = f"{line_count} lines and {byte_count:,} bytes"
warning = (
f"\n\n> WARNING: {INDEX_FILENAME} is {reason}. "
"Only part of it was loaded. Keep index entries to one line under ~150 chars."
)
return truncated + warning
# ── System prompt context ──────────────────────────────────────────────────
def get_memory_context(include_guidance: bool = False) -> str:
"""Return memory context for injection into the system prompt.
Combines user-level and project-level MEMORY.md content (if present).
Returns empty string when no memories exist.
Args:
include_guidance: if True, prepend the full memory system guidance
(MEMORY_SYSTEM_PROMPT). Normally False since the
system prompt template already includes brief guidance.
"""
parts: list[str] = []
# User-level index
user_content = get_index_content("user")
if user_content:
truncated = truncate_index_content(user_content)
parts.append(truncated)
# Project-level index (labelled separately)
proj_content = get_index_content("project")
if proj_content:
truncated = truncate_index_content(proj_content)
parts.append(f"[Project memories]\n{truncated}")
if not parts:
return ""
body = "\n\n".join(parts)
if include_guidance:
return f"{MEMORY_SYSTEM_PROMPT}\n\n## MEMORY.md\n{body}"
return body
# ── Relevant memory finder ─────────────────────────────────────────────────
def find_relevant_memories(
query: str,
max_results: int = 5,
use_ai: bool = False,
config: dict | None = None,
) -> list[dict]:
"""Find memories relevant to a query.
Strategy:
1. Always: keyword match on name + description + content
2. If use_ai=True and config has a model: use a small AI call to rank
Returns:
List of dicts with keys: name, description, type, scope, content,
file_path, mtime_s, freshness_text
"""
# Step 1: Keyword filter
keyword_results = search_memory(query)
if not keyword_results:
return []
if not use_ai or not config:
# Return top max_results by recency (newest first)
from .scan import scan_all_memories
headers = scan_all_memories()
path_to_mtime = {h.file_path: h.mtime_s for h in headers}
results = []
for entry in keyword_results[:max_results]:
mtime_s = path_to_mtime.get(entry.file_path, 0)
results.append({
"name": entry.name,
"description": entry.description,
"type": entry.type,
"scope": entry.scope,
"content": entry.content,
"file_path": entry.file_path,
"mtime_s": mtime_s,
"freshness_text": memory_freshness_text(mtime_s),
})
results.sort(key=lambda r: r["mtime_s"], reverse=True)
return results[:max_results]
# Step 2: AI-powered relevance selection (optional, lightweight)
return _ai_select_memories(query, keyword_results, max_results, config)
def _ai_select_memories(
query: str,
candidates: list,
max_results: int,
config: dict,
) -> list[dict]:
"""Use a fast AI call to select the most relevant memories from candidates.
Falls back to keyword results on any error.
"""
try:
from providers import stream, AssistantTurn
from .scan import scan_all_memories
headers = scan_all_memories()
path_to_mtime = {h.file_path: h.mtime_s for h in headers}
# Build manifest of candidates only
manifest_lines = []
for i, e in enumerate(candidates):
manifest_lines.append(f"{i}: [{e.type}] {e.name}{e.description}")
manifest = "\n".join(manifest_lines)
system = (
"You select memories relevant to a query. "
"Return a JSON object with key 'indices' containing a list of integer indices "
f"(0-based) from the provided list. Select at most {max_results} entries. "
"Only include indices clearly relevant to the query. Return {\"indices\": []} if none."
)
messages = [{"role": "user", "content": f"Query: {query}\n\nMemories:\n{manifest}"}]
result_text = ""
for event in stream(
model=config.get("model", "claude-haiku-4-5-20251001"),
system=system,
messages=messages,
tool_schemas=[],
config={**config, "max_tokens": 256, "no_tools": True},
):
if isinstance(event, AssistantTurn):
result_text = event.text
break
import json as _json
parsed = _json.loads(result_text)
selected_indices = [int(i) for i in parsed.get("indices", []) if isinstance(i, int)]
except Exception:
# Fall back to keyword results
selected_indices = list(range(min(max_results, len(candidates))))
results = []
for i in selected_indices[:max_results]:
if i < 0 or i >= len(candidates):
continue
entry = candidates[i]
mtime_s = path_to_mtime.get(entry.file_path, 0) if "path_to_mtime" in dir() else 0
results.append({
"name": entry.name,
"description": entry.description,
"type": entry.type,
"scope": entry.scope,
"content": entry.content,
"file_path": entry.file_path,
"mtime_s": mtime_s,
"freshness_text": memory_freshness_text(mtime_s),
})
return results

View File

@@ -0,0 +1,144 @@
"""Memory file scanning with mtime tracking and freshness/age helpers.
Mirrors the key ideas from Claude Code's memoryScan.ts and memoryAge.ts:
- Scan memory directories, sort newest-first
- Format a manifest for display or AI relevance selection
- Report memory age in human-readable form ("today", "3 days ago")
- Emit a staleness caveat for memories older than 1 day
"""
from __future__ import annotations
import math
import time
from dataclasses import dataclass
from pathlib import Path
from .store import get_memory_dir, parse_frontmatter, INDEX_FILENAME
MAX_MEMORY_FILES = 200
# ── Data model ─────────────────────────────────────────────────────────────
@dataclass
class MemoryHeader:
"""Lightweight descriptor loaded from a memory file's frontmatter.
Attributes:
filename: basename of the .md file
file_path: absolute path
mtime_s: modification time (seconds since epoch)
description: value from frontmatter `description:` field
type: value from frontmatter `type:` field
scope: "user" or "project"
"""
filename: str
file_path: str
mtime_s: float
description: str
type: str
scope: str
# ── Scanning ───────────────────────────────────────────────────────────────
def scan_memory_dir(mem_dir: Path, scope: str) -> list[MemoryHeader]:
"""Scan a single memory directory and return headers sorted newest-first.
Reads only the frontmatter (first ~30 lines) for efficiency.
Silently skips unreadable files. Caps at MAX_MEMORY_FILES entries.
"""
if not mem_dir.is_dir():
return []
headers: list[MemoryHeader] = []
for fp in mem_dir.glob("*.md"):
if fp.name == INDEX_FILENAME:
continue
try:
stat = fp.stat()
# Read only the first 30 lines for frontmatter
lines = fp.read_text(errors="replace").splitlines()[:30]
snippet = "\n".join(lines)
meta, _ = parse_frontmatter(snippet)
headers.append(MemoryHeader(
filename=fp.name,
file_path=str(fp),
mtime_s=stat.st_mtime,
description=meta.get("description", ""),
type=meta.get("type", ""),
scope=scope,
))
except Exception:
continue
headers.sort(key=lambda h: h.mtime_s, reverse=True)
return headers[:MAX_MEMORY_FILES]
def scan_all_memories() -> list[MemoryHeader]:
"""Scan both user and project memory directories, merged newest-first."""
user_dir = get_memory_dir("user")
proj_dir = get_memory_dir("project")
user_headers = scan_memory_dir(user_dir, "user")
proj_headers = scan_memory_dir(proj_dir, "project")
combined = user_headers + proj_headers
combined.sort(key=lambda h: h.mtime_s, reverse=True)
return combined[:MAX_MEMORY_FILES]
# ── Age / freshness ────────────────────────────────────────────────────────
def memory_age_days(mtime_s: float) -> int:
"""Days since mtime_s (floor-rounded, clamped to 0 for future times)."""
return max(0, math.floor((time.time() - mtime_s) / 86_400))
def memory_age_str(mtime_s: float) -> str:
"""Human-readable age: 'today', 'yesterday', or 'N days ago'."""
d = memory_age_days(mtime_s)
if d == 0:
return "today"
if d == 1:
return "yesterday"
return f"{d} days ago"
def memory_freshness_text(mtime_s: float) -> str:
"""Staleness caveat for memories older than 1 day (empty string if fresh).
Motivated by user reports of stale code-state memories (file:line
citations to code that has since changed) being asserted as fact.
"""
d = memory_age_days(mtime_s)
if d <= 1:
return ""
return (
f"This memory is {d} days old. "
"Memories are point-in-time observations, not live state — "
"claims about code behavior or file:line citations may be outdated. "
"Verify against current code before asserting as fact."
)
# ── Manifest formatting ────────────────────────────────────────────────────
def format_memory_manifest(headers: list[MemoryHeader]) -> str:
"""Format a list of MemoryHeader as a text manifest.
Format per line: [type/scope] filename (age): description
Example:
[feedback/user] feedback_testing.md (3 days ago): Don't mock DB in tests
[project/project] project_freeze.md (today): Merge freeze until 2026-04-10
"""
lines = []
for h in headers:
tag = f"[{h.type}/{h.scope}]" if h.type else f"[{h.scope}]"
age = memory_age_str(h.mtime_s)
if h.description:
lines.append(f"- {tag} {h.filename} ({age}): {h.description}")
else:
lines.append(f"- {tag} {h.filename} ({age})")
return "\n".join(lines)

View File

@@ -0,0 +1,223 @@
"""File-based memory storage with user-level and project-level scopes.
Storage layout:
user scope : ~/.nano_claude/memory/<slug>.md
project scope : .nano_claude/memory/<slug>.md (relative to cwd)
MEMORY.md in each directory is the index file — rebuilt automatically after
every save/delete. It is loaded into the system prompt to give Claude an
overview of available memories.
"""
from __future__ import annotations
import re
from dataclasses import dataclass
from pathlib import Path
# ── Paths ──────────────────────────────────────────────────────────────────
USER_MEMORY_DIR = Path.home() / ".nano_claude" / "memory"
INDEX_FILENAME = "MEMORY.md"
# Maximum lines/bytes for the index file (mirrors Claude Code limits)
MAX_INDEX_LINES = 200
MAX_INDEX_BYTES = 25_000
def get_project_memory_dir() -> Path:
"""Return the project-local memory directory (relative to cwd)."""
return Path.cwd() / ".nano_claude" / "memory"
def get_memory_dir(scope: str = "user") -> Path:
"""Return the memory directory for the given scope.
Args:
scope: "user" (global ~/.nano_claude/memory) or
"project" (.nano_claude/memory relative to cwd)
"""
if scope == "project":
return get_project_memory_dir()
return USER_MEMORY_DIR
# ── Data model ─────────────────────────────────────────────────────────────
@dataclass
class MemoryEntry:
"""A single memory entry loaded from a .md file.
Attributes:
name: human-readable name (also the display title in the index)
description: short one-line description (used for relevance decisions)
type: "user" | "feedback" | "project" | "reference"
content: body text of the memory
file_path: absolute path to the .md file on disk
created: date string, e.g. "2026-04-02"
scope: "user" | "project" — which directory this was loaded from
"""
name: str
description: str
type: str
content: str
file_path: str = ""
created: str = ""
scope: str = "user"
# ── Helpers ────────────────────────────────────────────────────────────────
def _slugify(name: str) -> str:
"""Convert name to a filesystem-safe slug (max 60 chars)."""
s = name.lower().strip().replace(" ", "_")
s = re.sub(r"[^a-z0-9_]", "", s)
return s[:60]
def parse_frontmatter(text: str) -> tuple[dict, str]:
"""Parse ---\\nkey: value\\n---\\nbody format.
Returns:
(meta_dict, body_str)
"""
if not text.startswith("---"):
return {}, text
parts = text.split("---", 2)
if len(parts) < 3:
return {}, text
meta: dict = {}
for line in parts[1].strip().splitlines():
if ":" in line:
key, _, val = line.partition(":")
meta[key.strip()] = val.strip()
return meta, parts[2].strip()
def _format_entry_md(entry: MemoryEntry) -> str:
"""Render a MemoryEntry as a markdown file with YAML frontmatter."""
return (
f"---\n"
f"name: {entry.name}\n"
f"description: {entry.description}\n"
f"type: {entry.type}\n"
f"created: {entry.created}\n"
f"---\n"
f"{entry.content}\n"
)
# ── Core storage operations ────────────────────────────────────────────────
def save_memory(entry: MemoryEntry, scope: str = "user") -> None:
"""Write/update a memory file and rebuild the index for that scope.
If a memory with the same name (slug) already exists, it is overwritten.
Args:
entry: MemoryEntry to persist
scope: "user" or "project"
"""
mem_dir = get_memory_dir(scope)
mem_dir.mkdir(parents=True, exist_ok=True)
slug = _slugify(entry.name)
fp = mem_dir / f"{slug}.md"
fp.write_text(_format_entry_md(entry))
entry.file_path = str(fp)
entry.scope = scope
_rewrite_index(scope)
def delete_memory(name: str, scope: str = "user") -> None:
"""Remove the memory file matching name and rebuild the index.
No error if not found.
"""
mem_dir = get_memory_dir(scope)
slug = _slugify(name)
fp = mem_dir / f"{slug}.md"
if fp.exists():
fp.unlink()
_rewrite_index(scope)
def load_entries(scope: str = "user") -> list[MemoryEntry]:
"""Scan all .md files (except MEMORY.md) in a scope and return entries.
Returns:
List of MemoryEntry sorted alphabetically by name.
"""
mem_dir = get_memory_dir(scope)
if not mem_dir.exists():
return []
entries: list[MemoryEntry] = []
for fp in sorted(mem_dir.glob("*.md")):
if fp.name == INDEX_FILENAME:
continue
try:
text = fp.read_text()
except Exception:
continue
meta, body = parse_frontmatter(text)
entries.append(MemoryEntry(
name=meta.get("name", fp.stem),
description=meta.get("description", ""),
type=meta.get("type", "user"),
content=body,
file_path=str(fp),
created=meta.get("created", ""),
scope=scope,
))
return entries
def load_index(scope: str = "all") -> list[MemoryEntry]:
"""Load memory entries from one or both scopes.
Args:
scope: "user", "project", or "all" (both combined)
Returns:
List of MemoryEntry (user entries first, then project).
"""
if scope == "all":
return load_entries("user") + load_entries("project")
return load_entries(scope)
def search_memory(query: str, scope: str = "all") -> list[MemoryEntry]:
"""Case-insensitive keyword match on name + description + content.
Returns:
List of matching MemoryEntry objects.
"""
q = query.lower()
results = []
for entry in load_index(scope):
haystack = f"{entry.name} {entry.description} {entry.content}".lower()
if q in haystack:
results.append(entry)
return results
def _rewrite_index(scope: str) -> None:
"""Rebuild MEMORY.md for the given scope from all .md files in that dir."""
mem_dir = get_memory_dir(scope)
if not mem_dir.exists():
return
index_path = mem_dir / INDEX_FILENAME
entries = load_entries(scope)
lines = [
f"- [{e.name}]({Path(e.file_path).name}) — {e.description}"
for e in entries
]
index_path.write_text("\n".join(lines) + ("\n" if lines else ""))
def get_index_content(scope: str = "user") -> str:
"""Return raw MEMORY.md content for the given scope, or '' if absent."""
mem_dir = get_memory_dir(scope)
index_path = mem_dir / INDEX_FILENAME
if not index_path.exists():
return ""
return index_path.read_text().strip()

View File

@@ -0,0 +1,216 @@
"""Memory tool registrations: MemorySave, MemoryDelete, MemorySearch.
Importing this module registers the three tools into the central registry.
"""
from __future__ import annotations
from datetime import datetime
from tool_registry import ToolDef, register_tool
from .store import MemoryEntry, save_memory, delete_memory, load_index
from .context import find_relevant_memories
from .scan import scan_all_memories, format_memory_manifest
# ── Tool implementations ───────────────────────────────────────────────────
def _memory_save(params: dict, config: dict) -> str:
"""Save or update a persistent memory entry."""
entry = MemoryEntry(
name=params["name"],
description=params["description"],
type=params["type"],
content=params["content"],
created=datetime.now().strftime("%Y-%m-%d"),
)
scope = params.get("scope", "user")
save_memory(entry, scope=scope)
scope_label = "project" if scope == "project" else "user"
return f"Memory saved: '{entry.name}' [{entry.type}/{scope_label}]"
def _memory_delete(params: dict, config: dict) -> str:
"""Delete a persistent memory entry by name."""
name = params["name"]
scope = params.get("scope", "user")
delete_memory(name, scope=scope)
return f"Memory deleted: '{name}' (scope: {scope})"
def _memory_search(params: dict, config: dict) -> str:
"""Search memories by keyword query with optional AI relevance filtering."""
query = params["query"]
use_ai = params.get("use_ai", False)
max_results = params.get("max_results", 5)
results = find_relevant_memories(
query, max_results=max_results, use_ai=use_ai, config=config
)
if not results:
return f"No memories found matching '{query}'."
lines = [f"Found {len(results)} relevant memory/memories for '{query}':", ""]
for r in results:
freshness = f"{r['freshness_text']}" if r["freshness_text"] else ""
lines.append(
f"[{r['type']}/{r['scope']}] {r['name']}\n"
f" {r['description']}\n"
f" {r['content'][:200]}{'...' if len(r['content']) > 200 else ''}"
f"{freshness}"
)
return "\n\n".join(lines)
def _memory_list(params: dict, config: dict) -> str:
"""List all memory entries with their manifest (type, scope, age, description)."""
headers = scan_all_memories()
if not headers:
return "No memories stored."
scope_filter = params.get("scope", "all")
if scope_filter != "all":
headers = [h for h in headers if h.scope == scope_filter]
if not headers:
return f"No {scope_filter} memories stored."
manifest = format_memory_manifest(headers)
return f"{len(headers)} memory/memories:\n\n{manifest}"
# ── Tool registrations ─────────────────────────────────────────────────────
register_tool(ToolDef(
name="MemorySave",
schema={
"name": "MemorySave",
"description": (
"Save a persistent memory entry as a markdown file with frontmatter. "
"Use for information that should persist across conversations: "
"user preferences, feedback/corrections, project context, or external references. "
"Do NOT save: code patterns, architecture, git history, or task state.\n\n"
"For feedback/project memories, structure content as: "
"rule/fact, then **Why:** and **How to apply:** lines."
),
"input_schema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Human-readable name (becomes the filename slug)",
},
"type": {
"type": "string",
"enum": ["user", "feedback", "project", "reference"],
"description": (
"user=preferences/role, feedback=guidance on how to work, "
"project=ongoing work/decisions, reference=external system pointers"
),
},
"description": {
"type": "string",
"description": "Short one-line description (used for relevance decisions — be specific)",
},
"content": {
"type": "string",
"description": "Body text. For feedback/project: rule/fact + **Why:** + **How to apply:**",
},
"scope": {
"type": "string",
"enum": ["user", "project"],
"description": (
"'user' (default) = ~/.nano_claude/memory/ shared across projects; "
"'project' = .nano_claude/memory/ local to this project"
),
},
},
"required": ["name", "type", "description", "content"],
},
},
func=_memory_save,
read_only=False,
concurrent_safe=False,
))
register_tool(ToolDef(
name="MemoryDelete",
schema={
"name": "MemoryDelete",
"description": "Delete a persistent memory entry by name.",
"input_schema": {
"type": "object",
"properties": {
"name": {"type": "string", "description": "Name of the memory to delete"},
"scope": {
"type": "string",
"enum": ["user", "project"],
"description": "Scope to delete from (default: 'user')",
},
},
"required": ["name"],
},
},
func=_memory_delete,
read_only=False,
concurrent_safe=False,
))
register_tool(ToolDef(
name="MemorySearch",
schema={
"name": "MemorySearch",
"description": (
"Search persistent memories by keyword. Returns matching entries with "
"content preview and staleness warning for old memories. "
"Set use_ai=true to use AI-powered relevance ranking (costs a small API call)."
),
"input_schema": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"},
"max_results": {
"type": "integer",
"description": "Maximum results to return (default: 5)",
},
"use_ai": {
"type": "boolean",
"description": "Use AI relevance ranking (default: false = keyword only)",
},
"scope": {
"type": "string",
"enum": ["user", "project", "all"],
"description": "Which scope to search (default: 'all')",
},
},
"required": ["query"],
},
},
func=_memory_search,
read_only=True,
concurrent_safe=True,
))
register_tool(ToolDef(
name="MemoryList",
schema={
"name": "MemoryList",
"description": (
"List all memory entries with type, scope, age, and description. "
"Useful for reviewing what's been remembered before deciding to save or delete."
),
"input_schema": {
"type": "object",
"properties": {
"scope": {
"type": "string",
"enum": ["user", "project", "all"],
"description": "Which scope to list (default: 'all')",
},
},
},
},
func=_memory_list,
read_only=True,
concurrent_safe=True,
))

View File

@@ -0,0 +1,86 @@
"""Memory type taxonomy and system-prompt guidance text.
Four types capture context NOT derivable from the current project state.
Code patterns, architecture, git history, and file structure are derivable
(via grep/git/CLAUDE.md) and should NOT be saved as memories.
"""
MEMORY_TYPES = ["user", "feedback", "project", "reference"]
# Condensed per-type guidance (used in system prompt injection)
MEMORY_TYPE_DESCRIPTIONS: dict[str, str] = {
"user": (
"Information about the user's role, goals, responsibilities, and knowledge. "
"Helps tailor future behavior to the user's preferences."
),
"feedback": (
"Guidance the user has given about how to approach work — both what to avoid "
"and what to keep doing. Lead with the rule, then **Why:** and **How to apply:**."
),
"project": (
"Ongoing work, goals, bugs, or incidents not derivable from code or git history. "
"Lead with the fact/decision, then **Why:** and **How to apply:**. "
"Always convert relative dates to absolute dates."
),
"reference": (
"Pointers to external systems (issue trackers, dashboards, Slack channels, docs)."
),
}
# What NOT to save (mirrors Claude Code source)
WHAT_NOT_TO_SAVE = """\
## What NOT to save in memory
- Code patterns, conventions, architecture, file paths, or project structure — derivable from the codebase.
- Git history, recent changes, who-changed-what — use `git log` / `git blame`.
- Debugging solutions or fix recipes — the fix is in the code; the commit message has context.
- Anything already documented in CLAUDE.md files.
- Ephemeral task details: in-progress work, temporary state, current conversation context.
These exclusions apply even when explicitly asked. If asked to save a PR list or activity summary,
ask what was *surprising* or *non-obvious* — that is the part worth keeping."""
# Memory format example (frontmatter)
MEMORY_FORMAT_EXAMPLE = """\
```markdown
---
name: {{memory name}}
description: {{one-line description — used to decide relevance, so be specific}}
type: {{user | feedback | project | reference}}
---
{{memory content — for feedback/project types: rule/fact, then **Why:** and **How to apply:** lines}}
```"""
# Full guidance injected into the system prompt
MEMORY_SYSTEM_PROMPT = """\
## Memory system
You have a persistent, file-based memory system. Memories are stored as markdown files with
YAML frontmatter. Build this up over time so future conversations have context about the user,
their preferences, and the work you're doing together.
**Types** (save only what cannot be derived from the codebase):
- **user** — role, goals, knowledge, preferences
- **feedback** — guidance on how to work (corrections AND confirmations of non-obvious approaches)
- **project** — ongoing work, decisions, deadlines not in git history
- **reference** — pointers to external systems (Linear, Grafana, Slack, etc.)
**When to save**: If the user corrects you, confirms an approach, or shares context that should
persist beyond this conversation. For feedback: save corrections AND quiet confirmations.
**Body structure for feedback/project**: Lead with the rule/fact, then:
**Why:** (reason given) | **How to apply:** (when this guidance kicks in)
**Format**:
{format_example}
**Saving is two steps**:
1. Write the memory to its own file (e.g. `feedback_testing.md`) using MemorySave.
2. The index (MEMORY.md) is updated automatically.
**What NOT to save**: code patterns, architecture, git history, debugging fixes,
anything already in CLAUDE.md, or ephemeral task state.
**Before recommending from memory**: A memory naming a file, function, or flag may be stale.
Verify it still exists before acting on it. For current state, prefer `git log` or reading code.
""".format(format_example=MEMORY_FORMAT_EXAMPLE)