From c64d71f4384a4e01e55ed62c11a268d6ad060770 Mon Sep 17 00:00:00 2001
From: m3tm3re
Date: Sat, 14 Feb 2026 08:22:59 +0100
Subject: [PATCH] docs(memory): update skills for opencode-memory plugin,
deprecate mem0
---
.sisyphus/boulder.json | 14 +-
.sisyphus/notepads/memory-system/decisions.md | 28 +
.sisyphus/notepads/memory-system/issues.md | 0
.sisyphus/notepads/memory-system/learnings.md | 47 +
.sisyphus/notepads/memory-system/problems.md | 0
.../notepads/opencode-memory/learnings.md | 120 ++
.sisyphus/plans/memory-system.md | 897 +++++++++
.sisyphus/plans/opencode-memory.md | 1634 +++++++++++++++++
context/profile.md | 30 +-
skills/mem0-memory/SKILL.md | 10 +-
skills/memory/SKILL.md | 214 +--
skills/memory/references/deployment.md | 54 +
12 files changed, 2857 insertions(+), 191 deletions(-)
create mode 100644 .sisyphus/notepads/memory-system/decisions.md
create mode 100644 .sisyphus/notepads/memory-system/issues.md
create mode 100644 .sisyphus/notepads/memory-system/learnings.md
create mode 100644 .sisyphus/notepads/memory-system/problems.md
create mode 100644 .sisyphus/notepads/opencode-memory/learnings.md
create mode 100644 .sisyphus/plans/memory-system.md
create mode 100644 .sisyphus/plans/opencode-memory.md
create mode 100644 skills/memory/references/deployment.md
diff --git a/.sisyphus/boulder.json b/.sisyphus/boulder.json
index ef97e9f..8b0ec8e 100644
--- a/.sisyphus/boulder.json
+++ b/.sisyphus/boulder.json
@@ -1,7 +1,9 @@
{
- "active_plan": "/home/m3tam3re/p/AI/AGENTS/.sisyphus/plans/chiron-agent-framework.md",
- "started_at": "2026-02-03T19:07:36.011Z",
- "session_ids": ["ses_3db18d3abffeIjqxbVVqNCz5As", "ses_3db16c6daffeKLCdiQiDREMZ3C"],
- "plan_name": "chiron-agent-framework",
- "completed_at": "2026-02-03T20:09:00.000Z"
-}
+ "active_plan": "/home/m3tam3re/p/AI/AGENTS/.sisyphus/plans/opencode-memory.md",
+ "started_at": "2026-02-14T04:43:37.746Z",
+ "session_ids": [
+ "ses_3a5a47a05ffeoNYfz2RARYsHX9"
+ ],
+ "plan_name": "opencode-memory",
+ "agent": "atlas"
+}
\ No newline at end of file
diff --git a/.sisyphus/notepads/memory-system/decisions.md b/.sisyphus/notepads/memory-system/decisions.md
new file mode 100644
index 0000000..5a15ac0
--- /dev/null
+++ b/.sisyphus/notepads/memory-system/decisions.md
@@ -0,0 +1,28 @@
+
+## Task 5: Update Mem0 Memory Skill (2026-02-12)
+
+### Decisions Made
+
+1. **Section Placement**: Added new sections without disrupting existing content structure
+ - "Memory Categories" after "Identity Scopes" (line ~109)
+ - "Dual-Layer Sync" after "Workflow Patterns" (line ~138)
+ - Extended "Health Check" section with Pre-Operation Check
+ - "Error Handling" at end, before API Reference
+
+2. **Content Structure**:
+ - Memory Categories: 5-category classification with table format
+ - Dual-Layer Sync: Complete sync pattern with bash example
+ - Health Check: Added pre-operation verification
+ - Error Handling: Comprehensive graceful degradation patterns
+
+3. **Validation Approach**:
+ - Used `./scripts/test-skill.sh --validate` for skill structure validation
+ - All sections verified with grep commands
+ - Commit and push completed successfully
+
+### Success Patterns
+
+- Edit tool works well for adding sections to existing markdown files
+- Preserving existing content while adding new sections
+- Using grep for verification of section additions
+- `./scripts/test-skill.sh --validate` validates YAML frontmatter automatically
diff --git a/.sisyphus/notepads/memory-system/issues.md b/.sisyphus/notepads/memory-system/issues.md
new file mode 100644
index 0000000..e69de29
diff --git a/.sisyphus/notepads/memory-system/learnings.md b/.sisyphus/notepads/memory-system/learnings.md
new file mode 100644
index 0000000..d19deec
--- /dev/null
+++ b/.sisyphus/notepads/memory-system/learnings.md
@@ -0,0 +1,47 @@
+
+## Core Memory Skill Creation (2026-02-12)
+
+**Task**: Create `skills/memory/SKILL.md` - dual-layer memory orchestration skill
+
+**Pattern Identified**:
+- Skill structure follows YAML frontmatter with required fields:
+ - `name`: skill identifier
+ - `description`: Use when (X), triggers (Y) pattern
+ - `compatibility`: "opencode"
+- Markdown structure: Overview, Prerequisites, Workflows, Error Handling, Integration, Quick Reference, See Also
+
+**Verification Pattern**:
+```bash
+test -f && echo "File exists"
+grep "name: "
+grep "key-term"
+```
+
+**Key Design Decision**:
+- Central orchestration skill that references underlying implementation skills (mem0-memory, obsidian)
+- 4 core workflows: Store, Recall, Auto-Capture, Auto-Recall
+- Error handling with graceful degradation
+
+## Apollo Agent Prompt Update (2026-02-12)
+
+**Task**: Add memory management responsibilities to Apollo agent system prompt
+
+**Edit Pattern**: Multiple targeted edits to single file preserving existing content
+- Line number-based edits require precise matching of surrounding context
+- Edit order: Core Responsibilities → Quality Standards → Tool Usage → Edge Cases
+- Each edit inserts new bullet items without removing existing content
+
+**Key Additions**:
+1. Core Responsibilities: "Manage dual-layer memory system (Mem0 + Obsidian CODEX)"
+2. Quality Standards: Memory storage, auto-capture, retrieval, categories
+3. Tool Usage: Mem0 REST API (localhost:8000), Obsidian MCP integration
+4. Edge Cases: Mem0 unavailable, Obsidian unavailable handling
+
+**Verification Pattern**:
+```bash
+grep -c "memory" ~/p/AI/AGENTS/prompts/apollo.txt # Count occurrences
+grep "Mem0" ~/p/AI/AGENTS/prompts/apollo.txt # Check specific term
+grep -i "auto-capture" ~/p/AI/AGENTS/prompts/apollo.txt # Case-insensitive
+```
+
+**Observation**: grep is case-sensitive by default - use -i for case-insensitive searches
diff --git a/.sisyphus/notepads/memory-system/problems.md b/.sisyphus/notepads/memory-system/problems.md
new file mode 100644
index 0000000..e69de29
diff --git a/.sisyphus/notepads/opencode-memory/learnings.md b/.sisyphus/notepads/opencode-memory/learnings.md
new file mode 100644
index 0000000..0091cf4
--- /dev/null
+++ b/.sisyphus/notepads/opencode-memory/learnings.md
@@ -0,0 +1,120 @@
+# Opencode Memory Plugin — Learnings
+
+## Session: ses_3a5a47a05ffeoNYfz2RARYsHX9
+Started: 2026-02-14
+
+### Architecture Decisions
+- SQLite + FTS5 + vec0 replaces mem0+qdrant entirely
+- Markdown at ~/CODEX/80-memory/ is source of truth
+- SQLite DB at ~/.local/share/opencode-memory/index.db is derived index
+- OpenAI text-embedding-3-small for embeddings (1536 dimensions)
+- Hybrid search: 0.7 vector weight + 0.3 BM25 weight
+- Chunking: 400 tokens, 80 overlap (tiktoken cl100k_base)
+
+### Key Patterns from Openclaw
+- MemoryIndexManager pattern (1590 lines) — file watching, chunking, indexing
+- Hybrid scoring with weighted combination
+- Embedding cache by content_hash + model
+- Two sources: "memory" (markdown files) + "sessions" (transcripts)
+- Two tools: memory_search (hybrid query) + memory_get (read lines)
+
+### Technical Stack
+- Runtime: bun
+- Test framework: bun test (TDD)
+- SQLite: better-sqlite3 (synchronous API)
+- Embeddings: openai npm package
+- Chunking: tiktoken (cl100k_base encoding)
+- File watching: chokidar
+- Validation: zod (for tool schemas)
+
+### Vec0 Extension Findings (Task 1)
+- **vec0 extension**: NOT AVAILABLE - requires vec0.so shared library not present
+- **Alternative solution**: sqlite-vec package (v0.1.7-alpha.2) successfully tested
+- **Loading mechanism**: `sqliteVec.load(db)` loads vector extension into database
+- **Test result**: Works with Node.js (better-sqlite3 native module compatible)
+- **Note**: better-sqlite3 does NOT work with Bun runtime (native module incompatibility)
+- **Testing command**: `node -e "const Database = require('better-sqlite3'); const sqliteVec = require('sqlite-vec'); const db = new Database(':memory:'); sqliteVec.load(db); console.log('OK')"`
+
+### Bun Runtime Limitations
+- better-sqlite3 native module NOT compatible with Bun (ERR_DLOPEN_FAILED)
+- Use Node.js for any code requiring better-sqlite3
+- Alternative: bun:sqlite API (similar API, but not same library)
+
+## Wave Progress
+- Wave 1: IN PROGRESS (Task 1)
+- Wave 2-6: PENDING
+
+### Configuration Module Implementation (Task: Config Module)
+- **TDD approach**: RED-GREEN-REFACTOR cycle successfully applied
+- **Pattern**: Default config object + resolveConfig() function for merging
+- **Path expansion**: `expandPath()` helper function handles `~` → `$HOME` expansion
+- **Test coverage**: 10 tests covering defaults, overrides, path expansion, and config merging
+- **TypeScript best practices**: Proper type exports from types.ts, type imports in config.ts
+- **Defaults match openclaw**: chunking (400/80), search weights (0.7/0.3), minScore (0.35), maxResults (6)
+- **Bun test framework**: Fast execution (~20ms for 10 tests), clean output
+
+### Database Schema Implementation (Task 2)
+- **TDD approach**: RED-GREEN-REFACTOR cycle successfully applied for db module
+- **Schema tables**: meta, files, chunks, embedding_cache, chunks_fts (FTS5), chunks_vec (vec0)
+- **WAL mode**: Enabled via `db.pragma('journal_mode = WAL')` for better concurrency
+- **Foreign keys**: Enabled via `db.pragma('foreign_keys = ON')`
+- **sqlite-vec integration**: Loaded via `sqliteVec.load(db)` for vector search capabilities
+- **FTS5 virtual table**: External content table referencing chunks for full-text search
+- **vec0 virtual table**: 1536-dimension float array for OpenAI text-embedding-3-small embeddings
+- **Test execution**: Use Node.js with tsx for TypeScript execution (not Bun runtime)
+- **Buffer handling**: Float32Array must be converted to Buffer via `Buffer.from(array.buffer)` for SQLite binding
+- **In-memory databases**: WAL mode returns 'memory' for :memory: DBs, 'wal' for file-based DBs
+- **Test coverage**: 9 tests covering table creation, data insertion, FTS5, vec0, WAL mode, and clean closure
+- **Error handling**: better-sqlite3 throws "The database connection is not open" for operations on closed DBs
+
+### Node.js Test Execution
+- **Issue**: better-sqlite3 not compatible with Bun runtime (native module)
+- **Solution**: Use Node.js with tsx (TypeScript executor) for running tests
+- **Command**: `npx tsx --test src/__tests__/db.test.ts`
+- **Node.test API**: Uses `describe`, `it`, `before`, `after` from 'node:test' module
+- **Assertions**: Use `assert` from 'node:assert' module
+- **Cleanup**: Use `after()` hooks for database cleanup, not `afterEach()` (node:test difference)
+
+### Embedding Provider Implementation (Task: Embeddings Module)
+- **TDD approach**: RED-GREEN-REFACTOR cycle successfully applied for embeddings module
+- **Mock database**: Created in-memory mock for testing since better-sqlite3 incompatible with Bun
+- **Float32 precision**: embeddings stored/retrieved via Float32Array has limited precision (use toBeCloseTo in tests)
+- **Cache implementation**: content_hash + model composite key in embedding_cache table
+- **Retry logic**: Exponential backoff (1s, 2s, 4s) for 429/500 errors, max 3 retries
+- **Test coverage**: 11 tests covering embed(), embedBatch(), cache hits/misses, API failures, retries, buffer conversion
+- **Helper functions**: embeddingToBuffer() and bufferToEmbedding() for Float32Array ↔ Buffer conversion
+- **Bun spyOn**: Use mockClear() to reset call count without replacing mock implementation
+- **Buffer size**: Float32 embedding stored as Buffer with size = dimensions * 4 bytes
+
+### FTS5 BM25 Search Implementation (Task: FTS5 Search Module)
+- **TDD approach**: RED-GREEN-REFACTOR cycle successfully applied for search module
+- **buildFtsQuery()**: Extracts alphanumeric tokens via regex `/[A-Za-z0-9_]+/g`, quotes them, joins with AND
+- **FTS5 escaping**: Tokens are quoted to handle special characters (e.g., `"term"`)
+- **BM25 score normalization**: `bm25RankToScore(rank)` converts BM25 rank to 0-1 score using `1 / (1 + normalized)`
+- **FTS5 external content tables**: The schema uses `content='chunks', content_rowid='rowid'` but requires manual insertion into chunks_fts
+- **Test data setup**: Must manually insert into chunks_fts after inserting into chunks (external content doesn't auto-populate)
+- **BM25 ranking**: Results are ordered by `rank` column (lower rank = better match for FTS5)
+- **Error handling**: searchFTS catches SQL errors and returns empty array (graceful degradation)
+- **MaxResults parameter**: Respects LIMIT clause in SQL query
+- **SearchResult interface**: Includes id, filePath, startLine, endLine, text, contentHash, source, score (all required)
+- **Prefix matching**: FTS5 supports prefix queries automatically via token matching (e.g., "test" matches "testing")
+- **No matches**: Returns empty array when query has no valid tokens or no matches found
+- **Test coverage**: 7 tests covering basic search, exact keywords, partial words, no matches, ranking, maxResults, and metadata
+
+### Hybrid Search Implementation (Task: Hybrid Search Combiner)
+- **TDD approach**: RED-GREEN-REFACTOR cycle successfully applied for hybrid search
+- **Weighted scoring**: Combined score = vectorWeight * vectorScore + textWeight * textScore (default: 0.7/0.3)
+- **Result merging**: Uses Map to merge results by chunk ID, preventing duplicates
+- **Dual-score tracking**: Each result tracks both vectorScore and textScore separately, allowing for degraded modes
+- **Graceful degradation**: Works with FTS5-only (vector search fails) or vector-only (FTS5 fails)
+- **minScore filtering**: Results below minScore threshold are filtered out after score calculation
+- **Score sorting**: Results sorted by combined score in descending order before applying maxResults limit
+- **Vector search fallback**: searchVector catches errors and returns empty array, allowing FTS5-only operation
+- **FTS5 query fallback**: searchFTS catches SQL errors and returns empty array, allowing vector-only operation
+- **Database cleanup**: beforeEach must delete from chunks_fts, chunks_vec, chunks, and files to avoid state bleed
+- **Virtual table corruption**: Deleting from FTS5/vec0 virtual tables can cause corruption - use try/catch to recreate
+- **SearchResult type conflict**: SearchResult is imported from types.ts, don't re-export in search.ts
+- **Test isolation**: Virtual tables (chunks_fts, chunks_vec) must be cleared and potentially recreated between tests
+- **Buffer conversion**: queryEmbedding converted to Buffer via Buffer.from(new Float32Array(array).buffer)
+- **Debug logging**: process.env.DEBUG_SEARCH flag enables detailed logging of FTS5 and vector search results
+- **Test coverage**: 9 tests covering combination, weighting, minScore filtering, deduplication, sorting, maxResults, degraded modes (FTS5-only, vector-only), and custom weights
diff --git a/.sisyphus/plans/memory-system.md b/.sisyphus/plans/memory-system.md
new file mode 100644
index 0000000..3cf52a9
--- /dev/null
+++ b/.sisyphus/plans/memory-system.md
@@ -0,0 +1,897 @@
+# Memory System for AGENTS + Obsidian CODEX
+
+## TL;DR
+
+> **Quick Summary**: Build a dual-layer memory system equivalent to openclaw's — Mem0 for fast semantic search/auto-recall + Obsidian CODEX vault for human-readable, versioned knowledge. Memories are stored in both layers and cross-referenced via IDs.
+>
+> **Deliverables**:
+> - New `skills/memory/SKILL.md` — Core orchestration skill (auto-capture, auto-recall, dual-layer sync)
+> - New `80-memory/` folder in CODEX vault with category subfolders + memory template
+> - Obsidian MCP server configuration (cyanheads/obsidian-mcp-server)
+> - Updated skills (mem0-memory, obsidian), Apollo prompt, CODEX docs, user profile
+>
+> **Estimated Effort**: Medium (9 tasks across config/docs, no traditional code)
+> **Parallel Execution**: YES — 4 waves
+> **Critical Path**: Task 1 (vault infra) → Task 4 (memory skill) → Task 9 (validation)
+
+---
+
+## Context
+
+### Original Request
+Adapt openclaw's memory system for the opencode AGENTS repo, integrated with the Obsidian CODEX vault at `~/CODEX`. The vault should serve as a "second brain" for both the user AND AI agents.
+
+### Interview Summary
+**Key Discussions**:
+- Analyzed openclaw's 3-layer memory architecture (SQLite+vectors builtin, memory-core plugin, memory-lancedb plugin with auto-capture/auto-recall)
+- User confirmed Mem0 is available self-hosted at localhost:8000 — just needs spinning up
+- User chose `80-memory/` as dedicated vault folder with category subfolders
+- User chose auto+explicit capture (LLM extraction at session end + "remember this" commands)
+- User chose agent QA only (no unit test infrastructure — repo is config/docs only)
+- No Obsidian MCP server currently configured — plan to add cyanheads/obsidian-mcp-server
+
+**Research Findings**:
+- cyanheads/obsidian-mcp-server (363 stars) — Best MCP server: frontmatter management, vault cache, search with pagination, tag management
+- GitHub Copilot's memory system: citation-based verification pattern (Phase 2 candidate)
+- Production recommendation: dual-layer (operational memory + documented knowledge)
+- Mem0 provides semantic search, user_id/agent_id/run_id scoping, metadata support, `/health` endpoint
+- Auto-capture best practice: max 3 per session, LLM extraction > regex patterns
+
+### Metis Review
+**Identified Gaps** (addressed):
+- 80-memory/ subfolders vs flat pattern: Resolved — follows `30-resources/` pattern (subfolders by TYPE), not `50-zettelkasten/` flat pattern
+- Mem0 health check: Added prerequisite validation step
+- Error handling undefined: Defined — Mem0 unavailable → skip, Obsidian unavailable → Mem0 only
+- Deployment order: Defined — CODEX first → MCP config → skills → validation
+- Scope creep risk: Locked down — citation verification, memory deletion/lifecycle, dashboards all Phase 2
+- Agent role clarity: Defined — memory skill loadable by any agent, Apollo is primary memory specialist
+
+---
+
+## Work Objectives
+
+### Core Objective
+Build a dual-layer memory system for opencode agents that stores memories in Mem0 (semantic search, operational) AND the Obsidian CODEX vault (human-readable, versioned, wiki-linked). Equivalent in capability to openclaw's memory system.
+
+### Concrete Deliverables
+**AGENTS repo** (`~/p/AI/AGENTS`):
+- `skills/memory/SKILL.md` — NEW: Core memory skill
+- `skills/memory/references/mcp-config.md` — NEW: Obsidian MCP server config documentation
+- `skills/mem0-memory/SKILL.md` — UPDATED: Add categories, dual-layer sync
+- `skills/obsidian/SKILL.md` — UPDATED: Add 80-memory/ conventions
+- `prompts/apollo.txt` — UPDATED: Add memory management responsibilities
+- `context/profile.md` — UPDATED: Add memory system configuration
+
+**CODEX vault** (`~/CODEX`):
+- `80-memory/` — NEW: Folder with subfolders (preferences/, facts/, decisions/, entities/, other/)
+- `templates/memory.md` — NEW: Memory note template
+- `tag-taxonomy.md` — UPDATED: Add #memory/* tags
+- `AGENTS.md` — UPDATED: Add 80-memory/ docs, folder decision tree, memory workflows
+- `README.md` — UPDATED: Add 80-memory/ to folder structure
+
+**Infrastructure** (Nix home-manager — outside AGENTS repo):
+- Add cyanheads/obsidian-mcp-server to opencode.json MCP section
+
+### Definition of Done
+- [x] All 11 files created/updated as specified
+- [x] `curl http://localhost:8000/health` returns 200 (Mem0 running)
+- [~] `curl http://127.0.0.1:27124/vault-info` returns vault info (Obsidian REST API) — *Requires Obsidian desktop app to be open*
+- [x] `./scripts/test-skill.sh --validate` passes for new/updated skills
+- [x] 80-memory/ folder exists in CODEX vault with 5 subfolders
+- [x] Memory template creates valid notes with correct frontmatter
+
+### Must Have
+- Dual-layer storage: every memory in Mem0 AND Obsidian
+- Auto-capture at session end (LLM-based, max 3 per session)
+- Explicit "remember this" command support
+- Auto-recall: inject relevant memories before agent starts
+- 5 categories: preference, fact, decision, entity, other
+- Health checks before memory operations
+- Cross-reference: mem0_id in Obsidian frontmatter, obsidian_ref in Mem0 metadata
+- Error handling: graceful degradation when either layer unavailable
+
+### Must NOT Have (Guardrails)
+- NO citation-based memory verification (Phase 2)
+- NO memory expiration/lifecycle management (Phase 2)
+- NO memory deletion/forget functionality (Phase 2)
+- NO memory search UI or Obsidian dashboards (Phase 2)
+- NO conflict resolution UI between layers (manual edit only)
+- NO unit tests (repo has no test infrastructure — agent QA only)
+- NO subfolders in 50-zettelkasten/ or 70-tasks/ (respect flat structure)
+- NO new memory categories beyond the 5 defined
+- NO modifications to existing Obsidian templates (only ADD memory.md)
+- NO changes to agents.json (no new agents or agent config changes)
+
+---
+
+## Verification Strategy
+
+> **UNIVERSAL RULE: ZERO HUMAN INTERVENTION**
+>
+> ALL tasks MUST be verifiable WITHOUT any human action.
+> Every criterion is verifiable by running a command or checking file existence.
+
+### Test Decision
+- **Infrastructure exists**: NO (config-only repo)
+- **Automated tests**: None (agent QA only)
+- **Framework**: N/A
+
+### Agent-Executed QA Scenarios (MANDATORY — ALL tasks)
+
+Verification tools by deliverable type:
+
+| Type | Tool | How Agent Verifies |
+|------|------|-------------------|
+| Vault folders/files | Bash (ls, test -f) | Check existence, content |
+| Skill YAML frontmatter | Bash (grep, python) | Parse and validate fields |
+| Mem0 API | Bash (curl) | Send requests, parse JSON |
+| Obsidian REST API | Bash (curl) | Read notes, check frontmatter |
+| MCP server | Bash (npx) | Test server startup |
+
+---
+
+## Execution Strategy
+
+### Parallel Execution Waves
+
+```
+Wave 1 (Start Immediately — no dependencies):
+├── Task 1: CODEX vault memory infrastructure (folders, template, tags)
+└── Task 3: Obsidian MCP server config documentation
+
+Wave 2 (After Wave 1 — depends on vault structure existing):
+├── Task 2: CODEX vault documentation updates (AGENTS.md, README.md)
+├── Task 4: Create core memory skill (skills/memory/SKILL.md)
+├── Task 5: Update Mem0 memory skill
+└── Task 6: Update Obsidian skill
+
+Wave 3 (After Wave 2 — depends on skill content for prompt/profile):
+├── Task 7: Update Apollo agent prompt
+└── Task 8: Update user context profile
+
+Wave 4 (After all — final validation):
+└── Task 9: End-to-end validation
+
+Critical Path: Task 1 → Task 4 → Task 9
+Parallel Speedup: ~50% faster than sequential
+```
+
+### Dependency Matrix
+
+| Task | Depends On | Blocks | Can Parallelize With |
+|------|------------|--------|---------------------|
+| 1 | None | 2, 4, 5, 6 | 3 |
+| 2 | 1 | 9 | 4, 5, 6 |
+| 3 | None | 4 | 1 |
+| 4 | 1, 3 | 7, 8, 9 | 5, 6 |
+| 5 | 1 | 9 | 4, 6 |
+| 6 | 1 | 9 | 4, 5 |
+| 7 | 4 | 9 | 8 |
+| 8 | 4 | 9 | 7 |
+| 9 | ALL | None | None (final) |
+
+### Agent Dispatch Summary
+
+| Wave | Tasks | Recommended Agents |
+|------|-------|-------------------|
+| 1 | 1, 3 | task(category="quick", load_skills=["obsidian"], run_in_background=false) |
+| 2 | 2, 4, 5, 6 | dispatch parallel: task(category="unspecified-high") for Task 4; task(category="quick") for 2, 5, 6 |
+| 3 | 7, 8 | task(category="quick", run_in_background=false) |
+| 4 | 9 | task(category="unspecified-low", run_in_background=false) |
+
+---
+
+## TODOs
+
+- [x] 1. CODEX Vault Memory Infrastructure
+
+ **What to do**:
+ - Create `80-memory/` folder with 5 subfolders: `preferences/`, `facts/`, `decisions/`, `entities/`, `other/`
+ - Create each subfolder with a `.gitkeep` file so git tracks empty directories
+ - Create `templates/memory.md` — memory note template with frontmatter:
+ ```yaml
+ ---
+ type: memory
+ category: # preference | fact | decision | entity | other
+ mem0_id: # Mem0 memory ID (e.g., "mem_abc123")
+ source: explicit # explicit | auto-capture
+ importance: # critical | high | medium | low
+ created: <% tp.date.now("YYYY-MM-DD") %>
+ updated: <% tp.date.now("YYYY-MM-DD") %>
+ tags:
+ - memory
+ sync_targets: []
+ ---
+
+ # Memory Title
+
+ ## Content
+
+
+ ## Context
+
+
+ ## Related
+
+ ```
+ - Update `tag-taxonomy.md` — add `#memory` tag category with subtags:
+ ```
+ #memory
+ ├── #memory/preference
+ ├── #memory/fact
+ ├── #memory/decision
+ ├── #memory/entity
+ └── #memory/other
+ ```
+ Include usage examples and definitions for each category
+
+ **Must NOT do**:
+ - Do NOT create subfolders inside 50-zettelkasten/ or 70-tasks/
+ - Do NOT modify existing templates (only ADD memory.md)
+ - Do NOT use Templater syntax that doesn't match existing templates
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - Reason: Simple file creation, no complex logic
+ - **Skills**: [`obsidian`]
+ - `obsidian`: Vault conventions, frontmatter patterns, template structure
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 1 (with Task 3)
+ - **Blocks**: Tasks 2, 4, 5, 6
+ - **Blocked By**: None
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/CODEX/30-resources/` — Subfolder-by-type pattern to follow (bookmarks/, literature/, meetings/, people/, recipes/)
+ - `/home/m3tam3re/CODEX/templates/task.md` — Template frontmatter pattern (type, status, created, updated, tags, sync_targets)
+ - `/home/m3tam3re/CODEX/templates/bookmark.md` — Simpler template example
+
+ **Documentation References**:
+ - `/home/m3tam3re/CODEX/AGENTS.md:22-27` — Frontmatter conventions (required fields: type, created, updated)
+ - `/home/m3tam3re/CODEX/AGENTS.md:163-176` — Template locations table (add memory row)
+ - `/home/m3tam3re/CODEX/tag-taxonomy.md:1-18` — Tag structure rules (max 3 levels, kebab-case)
+
+ **WHY Each Reference Matters**:
+ - `30-resources/` shows that subfolders-by-type is the established vault pattern for categorized content
+ - `task.md` template shows the exact frontmatter field set expected by the vault
+ - `tag-taxonomy.md` rules show the 3-level max hierarchy constraint for new tags
+
+ **Acceptance Criteria**:
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Verify 80-memory folder structure
+ Tool: Bash
+ Steps:
+ 1. test -d /home/m3tam3re/CODEX/80-memory/preferences
+ 2. test -d /home/m3tam3re/CODEX/80-memory/facts
+ 3. test -d /home/m3tam3re/CODEX/80-memory/decisions
+ 4. test -d /home/m3tam3re/CODEX/80-memory/entities
+ 5. test -d /home/m3tam3re/CODEX/80-memory/other
+ Expected Result: All 5 directories exist (exit code 0 for each)
+ Evidence: Shell output captured
+
+ Scenario: Verify memory template exists with correct frontmatter
+ Tool: Bash
+ Steps:
+ 1. test -f /home/m3tam3re/CODEX/templates/memory.md
+ 2. grep "type: memory" /home/m3tam3re/CODEX/templates/memory.md
+ 3. grep "category:" /home/m3tam3re/CODEX/templates/memory.md
+ 4. grep "mem0_id:" /home/m3tam3re/CODEX/templates/memory.md
+ Expected Result: File exists and contains required frontmatter fields
+ Evidence: grep output captured
+
+ Scenario: Verify tag-taxonomy updated with memory tags
+ Tool: Bash
+ Steps:
+ 1. grep "#memory" /home/m3tam3re/CODEX/tag-taxonomy.md
+ 2. grep "#memory/preference" /home/m3tam3re/CODEX/tag-taxonomy.md
+ 3. grep "#memory/fact" /home/m3tam3re/CODEX/tag-taxonomy.md
+ Expected Result: All memory tags present in taxonomy
+ Evidence: grep output captured
+ ```
+
+ **Commit**: YES
+ - Message: `feat(vault): add 80-memory folder structure and memory template`
+ - Files: `80-memory/`, `templates/memory.md`, `tag-taxonomy.md`
+ - Repo: `~/CODEX`
+
+---
+
+- [x] 2. CODEX Vault Documentation Updates
+
+ **What to do**:
+ - Update `AGENTS.md`:
+ - Add `80-memory/` row to Folder Structure table (line ~11)
+ - Add `#### 80-memory` section in Folder Details (after 70-tasks section, ~line 161)
+ - Update Folder Decision Tree to include memory branch: `Is it a memory/learned fact? → YES → 80-memory/`
+ - Add Memory template row to Template Locations table (line ~165)
+ - Add Memory Workflows section (after Sync Workflow): create memory, retrieve memory, dual-layer sync
+ - Update `README.md`:
+ - Add `80-memory/` to folder structure diagram with subfolders
+ - Add `80-memory/` row to Folder Details section
+ - Add memory template to Templates table
+
+ **Must NOT do**:
+ - Do NOT rewrite existing sections — only ADD new content
+ - Do NOT remove any existing folder/template documentation
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - Reason: Documentation additions to existing files, following established patterns
+ - **Skills**: [`obsidian`]
+ - `obsidian`: Vault documentation conventions
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 2 (with Tasks 4, 5, 6)
+ - **Blocks**: Task 9
+ - **Blocked By**: Task 1 (needs folder structure to reference)
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/CODEX/AGENTS.md:110-161` — Existing Folder Details sections to follow pattern
+ - `/home/m3tam3re/CODEX/AGENTS.md:75-108` — Folder Decision Tree format
+ - `/home/m3tam3re/CODEX/README.md` — Folder structure diagram format
+
+ **WHY Each Reference Matters**:
+ - AGENTS.md folder details show the exact format: Purpose, Structure (flat/subfolders), Key trait, When to use, Naming convention
+ - Decision tree shows the exact `├─ YES →` format to follow
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Verify AGENTS.md has 80-memory documentation
+ Tool: Bash
+ Steps:
+ 1. grep "80-memory" /home/m3tam3re/CODEX/AGENTS.md
+ 2. grep "Is it a memory" /home/m3tam3re/CODEX/AGENTS.md
+ 3. grep "templates/memory.md" /home/m3tam3re/CODEX/AGENTS.md
+ Expected Result: All three patterns found
+ Evidence: grep output
+
+ Scenario: Verify README.md has 80-memory in structure
+ Tool: Bash
+ Steps:
+ 1. grep "80-memory" /home/m3tam3re/CODEX/README.md
+ 2. grep "preferences/" /home/m3tam3re/CODEX/README.md
+ Expected Result: Folder and subfolder documented
+ Evidence: grep output
+ ```
+
+ **Commit**: YES
+ - Message: `docs(vault): add 80-memory documentation to AGENTS.md and README.md`
+ - Files: `AGENTS.md`, `README.md`
+ - Repo: `~/CODEX`
+
+---
+
+- [x] 3. Obsidian MCP Server Configuration Documentation
+
+ **What to do**:
+ - Create `skills/memory/references/mcp-config.md` documenting:
+ - cyanheads/obsidian-mcp-server configuration for opencode.json
+ - Required environment variables: `OBSIDIAN_API_KEY`, `OBSIDIAN_BASE_URL`, `OBSIDIAN_VERIFY_SSL`, `OBSIDIAN_ENABLE_CACHE`
+ - opencode.json MCP section snippet:
+ ```json
+ "Obsidian-Vault": {
+ "command": ["npx", "obsidian-mcp-server"],
+ "environment": {
+ "OBSIDIAN_API_KEY": "",
+ "OBSIDIAN_BASE_URL": "http://127.0.0.1:27123",
+ "OBSIDIAN_VERIFY_SSL": "false",
+ "OBSIDIAN_ENABLE_CACHE": "true"
+ },
+ "enabled": true,
+ "type": "local"
+ }
+ ```
+ - Nix home-manager snippet showing how to add to `programs.opencode.settings.mcp`
+ - Note that this requires `home-manager switch` after adding
+ - Available MCP tools list: obsidian_read_note, obsidian_update_note, obsidian_global_search, obsidian_manage_frontmatter, obsidian_manage_tags, obsidian_list_notes, obsidian_delete_note, obsidian_search_replace
+ - How to get the API key from Obsidian: Settings → Local REST API plugin
+
+ **Must NOT do**:
+ - Do NOT directly modify `~/.config/opencode/opencode.json` (Nix-managed)
+ - Do NOT modify `agents/agents.json`
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - Reason: Creating a single reference doc
+ - **Skills**: [`obsidian`]
+ - `obsidian`: Obsidian REST API configuration knowledge
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 1 (with Task 1)
+ - **Blocks**: Task 4
+ - **Blocked By**: None
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md:156-166` — Existing API reference pattern
+ - `/home/m3tam3re/.config/opencode/opencode.json:77-127` — Current MCP config format (Exa, Basecamp, etc.)
+
+ **External References**:
+ - GitHub: `https://github.com/cyanheads/obsidian-mcp-server` — Config docs, env vars, tool list
+ - npm: `npx obsidian-mcp-server` — Installation method
+
+ **WHY Each Reference Matters**:
+ - opencode.json MCP section shows exact JSON format needed (command array, environment, enabled, type)
+ - cyanheads repo shows required env vars and their defaults
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Verify MCP config reference file exists
+ Tool: Bash
+ Steps:
+ 1. test -f /home/m3tam3re/p/AI/AGENTS/skills/memory/references/mcp-config.md
+ 2. grep "obsidian-mcp-server" /home/m3tam3re/p/AI/AGENTS/skills/memory/references/mcp-config.md
+ 3. grep "OBSIDIAN_API_KEY" /home/m3tam3re/p/AI/AGENTS/skills/memory/references/mcp-config.md
+ 4. grep "home-manager" /home/m3tam3re/p/AI/AGENTS/skills/memory/references/mcp-config.md
+ Expected Result: File exists with MCP config, env vars, and Nix instructions
+ Evidence: grep output
+ ```
+
+ **Commit**: YES (groups with Task 4)
+ - Message: `feat(memory): add core memory skill and MCP config reference`
+ - Files: `skills/memory/SKILL.md`, `skills/memory/references/mcp-config.md`
+ - Repo: `~/p/AI/AGENTS`
+
+---
+
+- [x] 4. Create Core Memory Skill
+
+ **What to do**:
+ - Create `skills/memory/SKILL.md` — the central orchestration skill for the dual-layer memory system
+ - YAML frontmatter:
+ ```yaml
+ ---
+ name: memory
+ description: "Dual-layer memory system (Mem0 + Obsidian CODEX). Use when: (1) storing information for future recall ('remember this'), (2) auto-capturing session insights, (3) recalling past decisions/preferences/facts, (4) injecting relevant context before tasks. Triggers: 'remember', 'recall', 'what do I know about', 'memory', session end."
+ compatibility: opencode
+ ---
+ ```
+ - Sections to include:
+ 1. **Overview** — Dual-layer architecture (Mem0 operational + Obsidian documented)
+ 2. **Prerequisites** — Mem0 running at localhost:8000, Obsidian MCP configured (reference mcp-config.md)
+ 3. **Memory Categories** — 5 categories with definitions and examples:
+ - preference: Personal preferences (UI, workflow, communication style)
+ - fact: Objective information about user/work (role, tech stack, constraints)
+ - decision: Architectural/tool choices made (with rationale)
+ - entity: People, organizations, systems, concepts
+ - other: Everything else
+ 4. **Workflow 1: Store Memory (Explicit)** — User says "remember X":
+ - Classify category
+ - POST to Mem0 `/memories` with user_id, metadata (category, source: "explicit")
+ - Create Obsidian note in `80-memory//` using memory template
+ - Cross-reference: mem0_id in Obsidian frontmatter, obsidian_ref in Mem0 metadata
+ 5. **Workflow 2: Recall Memory** — User asks "what do I know about X":
+ - POST to Mem0 `/search` with query
+ - Return results with Obsidian note paths for reference
+ 6. **Workflow 3: Auto-Capture (Session End)** — Automatic extraction:
+ - Scan conversation for memory-worthy content (preferences stated, decisions made, important facts)
+ - Select top 3 highest-value memories
+ - For each: store in Mem0 AND create Obsidian note (source: "auto-capture")
+ - Present to user: "I captured these memories: [list]. Confirm or reject?"
+ 7. **Workflow 4: Auto-Recall (Session Start)** — Context injection:
+ - On session start, search Mem0 with user's first message
+ - If relevant memories found (score > 0.7), inject as `` context
+ - Limit to top 5 most relevant
+ 8. **Error Handling** — Graceful degradation:
+ - Mem0 unavailable: `curl http://localhost:8000/health` fails → skip all memory ops, warn user
+ - Obsidian unavailable: Store in Mem0 only, log that Obsidian sync failed
+ - Both unavailable: Skip memory entirely, continue without memory features
+ 9. **Integration** — How other skills/agents use memory:
+ - Load `memory` skill to access memory workflows
+ - Apollo is primary memory specialist
+ - Any agent can search/store via Mem0 REST API patterns in `mem0-memory` skill
+
+ **Must NOT do**:
+ - Do NOT implement citation-based verification
+ - Do NOT implement memory deletion/forget
+ - Do NOT add memory expiration logic
+ - Do NOT create dashboards or search UI
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: Core deliverable requiring careful architecture documentation, must be comprehensive
+ - **Skills**: [`obsidian`, `mem0-memory`]
+ - `obsidian`: Vault conventions, template patterns, frontmatter standards
+ - `mem0-memory`: Mem0 REST API patterns, endpoint details
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 2 (with Tasks 2, 5, 6)
+ - **Blocks**: Tasks 7, 8, 9
+ - **Blocked By**: Tasks 1, 3
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md` — Full file: Mem0 REST API patterns, endpoint table, identity scopes, workflow patterns
+ - `/home/m3tam3re/p/AI/AGENTS/skills/obsidian/SKILL.md` — Full file: Obsidian REST API patterns, create/read/update note workflows, frontmatter conventions
+ - `/home/m3tam3re/p/AI/AGENTS/skills/reflection/SKILL.md` — Skill structure pattern (overview, workflows, integration)
+
+ **API References**:
+ - `/home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md:13-21` — Quick Reference endpoint table
+ - `/home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md:90-109` — Identity scopes (user_id, agent_id, run_id)
+
+ **Documentation References**:
+ - `/home/m3tam3re/CODEX/AGENTS.md:22-27` — Frontmatter conventions for vault notes
+ - `/home/m3tam3re/p/AI/AGENTS/skills/memory/references/mcp-config.md` — MCP server config (created in Task 3)
+
+ **External References**:
+ - OpenClaw reference: `/home/m3tam3re/p/AI/openclaw/extensions/memory-lancedb/index.ts` — Auto-capture regex patterns, auto-recall injection, importance scoring (use as inspiration, not copy)
+
+ **WHY Each Reference Matters**:
+ - mem0-memory SKILL.md provides the exact API endpoints and patterns to reference in dual-layer sync workflows
+ - obsidian SKILL.md provides the vault file creation patterns (curl commands, path encoding)
+ - openclaw memory-lancedb shows the auto-capture/auto-recall architecture to adapt
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Validate skill YAML frontmatter
+ Tool: Bash
+ Steps:
+ 1. test -f /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 2. grep "^name: memory$" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 3. grep "^compatibility: opencode$" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 4. grep "description:" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ Expected Result: Valid YAML frontmatter with name, description, compatibility
+ Evidence: grep output
+
+ Scenario: Verify skill contains all required workflows
+ Tool: Bash
+ Steps:
+ 1. grep -c "## Workflow" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 2. grep "Auto-Capture" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 3. grep "Auto-Recall" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 4. grep "Error Handling" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 5. grep "preference" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ Expected Result: At least 4 workflow sections, auto-capture, auto-recall, error handling, categories
+ Evidence: grep output
+
+ Scenario: Verify dual-layer sync pattern documented
+ Tool: Bash
+ Steps:
+ 1. grep "mem0_id" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 2. grep "obsidian_ref" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 3. grep "localhost:8000" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ 4. grep "80-memory" /home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md
+ Expected Result: Cross-reference IDs and both layer endpoints documented
+ Evidence: grep output
+ ```
+
+ **Commit**: YES (groups with Task 3)
+ - Message: `feat(memory): add core memory skill and MCP config reference`
+ - Files: `skills/memory/SKILL.md`, `skills/memory/references/mcp-config.md`
+ - Repo: `~/p/AI/AGENTS`
+
+---
+
+- [x] 5. Update Mem0 Memory Skill
+
+ **What to do**:
+ - Add "Memory Categories" section after Identity Scopes (line ~109):
+ - Table: category name, definition, Obsidian path, example
+ - Metadata pattern for categories: `{"category": "preference", "source": "explicit|auto-capture"}`
+ - Add "Dual-Layer Sync" section after Workflow Patterns:
+ - After storing to Mem0, also create Obsidian note in `80-memory//`
+ - Include mem0_id from response in Obsidian note frontmatter
+ - Include obsidian_ref path in Mem0 metadata via update
+ - Add "Health Check" workflow: Check `/health` before any memory operations
+ - Add "Error Handling" section: What to do when Mem0 is unavailable
+
+ **Must NOT do**:
+ - Do NOT delete existing content
+ - Do NOT change the YAML frontmatter description (triggers)
+ - Do NOT change existing API endpoint documentation
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - Reason: Adding sections to existing well-structured file
+ - **Skills**: [`mem0-memory`]
+ - `mem0-memory`: Existing skill patterns to extend
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 2 (with Tasks 2, 4, 6)
+ - **Blocks**: Task 9
+ - **Blocked By**: Task 1
+
+ **References**:
+
+ - `/home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md` — Full file: current content to extend (preserve ALL existing content)
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Verify categories added to mem0-memory skill
+ Tool: Bash
+ Steps:
+ 1. grep "Memory Categories" /home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md
+ 2. grep "preference" /home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md
+ 3. grep "Dual-Layer" /home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md
+ 4. grep "80-memory" /home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md
+ Expected Result: New sections present alongside existing content
+ Evidence: grep output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(mem0-memory): add memory categories and dual-layer sync patterns`
+ - Files: `skills/mem0-memory/SKILL.md`
+ - Repo: `~/p/AI/AGENTS`
+
+---
+
+- [x] 6. Update Obsidian Skill
+
+ **What to do**:
+ - Add "Memory Folder Conventions" section (after Best Practices, ~line 228):
+ - Document `80-memory/` structure with 5 subfolders
+ - Memory note naming: kebab-case (e.g., `prefers-dark-mode.md`)
+ - Required frontmatter fields for memory notes (type, category, mem0_id, etc.)
+ - Add "Memory Note Workflows" section:
+ - Create memory note: POST to vault REST API with memory template content
+ - Read memory note: GET with path encoding for `80-memory/` paths
+ - Search memories: Search within `80-memory/` path filter
+ - Update Integration table to include memory skill handoff
+
+ **Must NOT do**:
+ - Do NOT change existing content or workflows
+ - Do NOT modify the YAML frontmatter
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: [`obsidian`]
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 2
+ - **Blocks**: Task 9
+ - **Blocked By**: Task 1
+
+ **References**:
+ - `/home/m3tam3re/p/AI/AGENTS/skills/obsidian/SKILL.md` — Full file: current content to extend
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Verify memory conventions added to obsidian skill
+ Tool: Bash
+ Steps:
+ 1. grep "Memory Folder" /home/m3tam3re/p/AI/AGENTS/skills/obsidian/SKILL.md
+ 2. grep "80-memory" /home/m3tam3re/p/AI/AGENTS/skills/obsidian/SKILL.md
+ 3. grep "mem0_id" /home/m3tam3re/p/AI/AGENTS/skills/obsidian/SKILL.md
+ Expected Result: Memory folder docs and frontmatter patterns present
+ Evidence: grep output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(obsidian): add memory folder conventions and workflows`
+ - Files: `skills/obsidian/SKILL.md`
+ - Repo: `~/p/AI/AGENTS`
+
+---
+
+- [x] 7. Update Apollo Agent Prompt
+
+ **What to do**:
+ - Add "Memory Management" to Core Responsibilities list (after item 4):
+ - Store memories in dual-layer system (Mem0 + Obsidian CODEX)
+ - Retrieve memories via semantic search (Mem0)
+ - Auto-capture session insights at session end (max 3, confirm with user)
+ - Handle explicit "remember this" requests
+ - Inject relevant memories into context on session start
+ - Add memory-related tools to Tool Usage section
+ - Add memory error handling to Edge Cases
+
+ **Must NOT do**:
+ - Do NOT remove existing responsibilities
+ - Do NOT change Apollo's identity or boundaries
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 3 (with Task 8)
+ - **Blocks**: Task 9
+ - **Blocked By**: Task 4
+
+ **References**:
+ - `/home/m3tam3re/p/AI/AGENTS/prompts/apollo.txt` — Full file (47 lines): current prompt to extend
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Verify memory management added to Apollo prompt
+ Tool: Bash
+ Steps:
+ 1. grep -i "memory" /home/m3tam3re/p/AI/AGENTS/prompts/apollo.txt | wc -l
+ 2. grep "Mem0" /home/m3tam3re/p/AI/AGENTS/prompts/apollo.txt
+ 3. grep "auto-capture" /home/m3tam3re/p/AI/AGENTS/prompts/apollo.txt
+ Expected Result: Multiple memory references, Mem0 mentioned, auto-capture documented
+ Evidence: grep output
+ ```
+
+ **Commit**: YES (groups with Task 8)
+ - Message: `feat(agents): add memory management to Apollo prompt and user profile`
+ - Files: `prompts/apollo.txt`, `context/profile.md`
+ - Repo: `~/p/AI/AGENTS`
+
+---
+
+- [x] 8. Update User Context Profile
+
+ **What to do**:
+ - Add "Memory System" section to `context/profile.md`:
+ - Mem0 endpoint: `http://localhost:8000`
+ - Mem0 user_id: `m3tam3re` (or whatever the user's ID should be)
+ - Obsidian vault path: `~/CODEX`
+ - Memory folder: `80-memory/`
+ - Auto-capture: enabled, max 3 per session
+ - Auto-recall: enabled, top 5 results, score threshold 0.7
+ - Memory categories: preference, fact, decision, entity, other
+ - Obsidian MCP server: cyanheads/obsidian-mcp-server (see skills/memory/references/mcp-config.md)
+
+ **Must NOT do**:
+ - Do NOT remove existing profile content
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 3 (with Task 7)
+ - **Blocks**: Task 9
+ - **Blocked By**: Task 4
+
+ **References**:
+ - `/home/m3tam3re/p/AI/AGENTS/context/profile.md` — Current profile to extend
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Verify memory config in profile
+ Tool: Bash
+ Steps:
+ 1. grep "Memory System" /home/m3tam3re/p/AI/AGENTS/context/profile.md
+ 2. grep "localhost:8000" /home/m3tam3re/p/AI/AGENTS/context/profile.md
+ 3. grep "80-memory" /home/m3tam3re/p/AI/AGENTS/context/profile.md
+ 4. grep "auto-capture" /home/m3tam3re/p/AI/AGENTS/context/profile.md
+ Expected Result: Memory system section with all config values
+ Evidence: grep output
+ ```
+
+ **Commit**: YES (groups with Task 7)
+ - Message: `feat(agents): add memory management to Apollo prompt and user profile`
+ - Files: `prompts/apollo.txt`, `context/profile.md`
+ - Repo: `~/p/AI/AGENTS`
+
+---
+
+- [x] 9. End-to-End Validation
+
+ **What to do**:
+ - Verify ALL files exist and contain expected content
+ - Run skill validation: `./scripts/test-skill.sh memory`
+ - Test Mem0 availability: `curl http://localhost:8000/health`
+ - Test Obsidian REST API: `curl http://127.0.0.1:27124/vault-info`
+ - Verify CODEX vault structure: `ls -la ~/CODEX/80-memory/`
+ - Verify template: `cat ~/CODEX/templates/memory.md | head -20`
+ - Check all YAML frontmatter valid across new/updated skill files
+
+ **Must NOT do**:
+ - Do NOT create automated test infrastructure
+ - Do NOT modify any files — validation only
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-low`
+ - Reason: Verification only, running commands and checking outputs
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO
+ - **Parallel Group**: Wave 4 (final, sequential)
+ - **Blocks**: None (final task)
+ - **Blocked By**: ALL tasks (1-8)
+
+ **Acceptance Criteria**:
+
+ ```
+ Scenario: Full file existence check
+ Tool: Bash
+ Steps:
+ 1. test -f ~/p/AI/AGENTS/skills/memory/SKILL.md
+ 2. test -f ~/p/AI/AGENTS/skills/memory/references/mcp-config.md
+ 3. test -d ~/CODEX/80-memory/preferences
+ 4. test -f ~/CODEX/templates/memory.md
+ 5. grep "80-memory" ~/CODEX/AGENTS.md
+ 6. grep "#memory" ~/CODEX/tag-taxonomy.md
+ 7. grep "80-memory" ~/CODEX/README.md
+ 8. grep -i "memory" ~/p/AI/AGENTS/prompts/apollo.txt
+ 9. grep "Memory System" ~/p/AI/AGENTS/context/profile.md
+ Expected Result: All checks pass (exit code 0)
+ Evidence: Shell output captured
+
+ Scenario: Mem0 health check
+ Tool: Bash
+ Preconditions: Mem0 server must be running
+ Steps:
+ 1. curl -s -o /dev/null -w "%{http_code}" http://localhost:8000/health
+ Expected Result: HTTP 200
+ Evidence: Status code captured
+ Note: If Mem0 not running, this test will fail — spin up Mem0 first
+
+ Scenario: Obsidian REST API check
+ Tool: Bash
+ Preconditions: Obsidian desktop app must be running with Local REST API plugin
+ Steps:
+ 1. curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:27124/vault-info
+ Expected Result: HTTP 200
+ Evidence: Status code captured
+ Note: Requires Obsidian desktop app to be open
+
+ Scenario: Skill validation
+ Tool: Bash
+ Steps:
+ 1. cd ~/p/AI/AGENTS && ./scripts/test-skill.sh memory
+ Expected Result: Validation passes (no errors)
+ Evidence: Script output captured
+ ```
+
+ **Commit**: NO (validation only, no file changes)
+
+---
+
+## Commit Strategy
+
+| After Task | Message | Files | Repo | Verification |
+|------------|---------|-------|------|--------------|
+| 1 | `feat(vault): add 80-memory folder structure and memory template` | 80-memory/, templates/memory.md, tag-taxonomy.md | ~/CODEX | ls + grep |
+| 2 | `docs(vault): add 80-memory documentation to AGENTS.md and README.md` | AGENTS.md, README.md | ~/CODEX | grep |
+| 3+4 | `feat(memory): add core memory skill and MCP config reference` | skills/memory/SKILL.md, skills/memory/references/mcp-config.md | ~/p/AI/AGENTS | test-skill.sh |
+| 5 | `feat(mem0-memory): add memory categories and dual-layer sync patterns` | skills/mem0-memory/SKILL.md | ~/p/AI/AGENTS | grep |
+| 6 | `feat(obsidian): add memory folder conventions and workflows` | skills/obsidian/SKILL.md | ~/p/AI/AGENTS | grep |
+| 7+8 | `feat(agents): add memory management to Apollo prompt and user profile` | prompts/apollo.txt, context/profile.md | ~/p/AI/AGENTS | grep |
+
+**Note**: Two different git repos! CODEX and AGENTS commits are independent.
+
+---
+
+## Success Criteria
+
+### Verification Commands
+```bash
+# CODEX vault structure
+ls ~/CODEX/80-memory/ # Expected: preferences/ facts/ decisions/ entities/ other/
+cat ~/CODEX/templates/memory.md | head -5 # Expected: ---\ntype: memory
+grep "#memory" ~/CODEX/tag-taxonomy.md # Expected: #memory/* tags
+
+# AGENTS skill validation
+cd ~/p/AI/AGENTS && ./scripts/test-skill.sh memory # Expected: pass
+
+# Infrastructure (requires services running)
+curl -s http://localhost:8000/health # Expected: 200
+curl -s http://127.0.0.1:27124/vault-info # Expected: 200
+```
+
+### Final Checklist
+- [x] All "Must Have" present (dual-layer, auto-capture, auto-recall, categories, health checks, error handling)
+- [x] All "Must NOT Have" absent (no citation system, no deletion, no dashboards, no unit tests)
+- [x] CODEX commits pushed (vault structure + docs)
+- [x] AGENTS commits pushed (skills + prompts + profile)
+- [x] User reminded to add Obsidian MCP to Nix config and run `home-manager switch`
+- [x] User reminded to spin up Mem0 server before using memory features
diff --git a/.sisyphus/plans/opencode-memory.md b/.sisyphus/plans/opencode-memory.md
new file mode 100644
index 0000000..f461cd1
--- /dev/null
+++ b/.sisyphus/plans/opencode-memory.md
@@ -0,0 +1,1634 @@
+# Opencode Memory Plugin — Hybrid Memory System
+
+## TL;DR
+
+> **Quick Summary**: Build `opencode-memory`, a standalone Opencode plugin that replaces mem0+qdrant with a unified SQLite-based hybrid memory system. Indexes markdown files from the user's Obsidian vault (`~/CODEX/80-memory/`) and Opencode session transcripts into a SQLite database with FTS5 (BM25 keyword search) and vec0 (vector cosine similarity). Provides auto-recall on session start, auto-capture on session idle, and three agent tools (memory_search, memory_store, memory_get). Architecture inspired by Openclaw's battle-tested 1590-line MemoryIndexManager.
+>
+> **Deliverables**:
+> - Standalone TypeScript git repo: `opencode-memory/`
+> - Opencode plugin with session.created, session.idle, session.compacting hooks
+> - Three custom tools: memory_search (hybrid query), memory_store (save markdown + index), memory_get (read specific file/lines)
+> - SQLite database with FTS5 + vec0 extensions for hybrid search
+> - OpenAI text-embedding-3 integration with content-hash caching
+> - Session transcript indexer reading Opencode's JSON storage format
+> - Full TDD test suite (bun test)
+> - Updated AGENTS repo skills (memory, mem0-memory deprecation notes)
+>
+> **Estimated Effort**: Large
+> **Parallel Execution**: YES — 5 waves
+> **Critical Path**: Task 1 → 2 → 4 → 6 → 8 → 10 → 12
+
+---
+
+## Context
+
+### Original Request
+"I want to implement a memory system for my Opencode Agent. A project named Openclaw has a very nice memory system and I would like to make something similar." User has mem0+qdrant running with Obsidian vault integration. Wants persistent, reliable memory with hybrid search. Open to replacing the existing architecture if something better exists.
+
+### Interview Summary
+**Key Discussions**:
+- **Architecture**: User chose full SQLite replacement (drop mem0) — the most reliable approach. Single source of truth (markdown), derived index (SQLite).
+- **Embedding Provider**: OpenAI text-embedding-3 (user's explicit choice over Gemini and local).
+- **Plugin Location**: Separate git repo (not in AGENTS repo). Own npm package/Nix input.
+- **Test Strategy**: TDD with bun test. New repo needs full test infrastructure setup.
+- **Session Indexing**: Yes, full transcripts. Read from `~/.local/share/opencode/storage/`.
+- **Deployment**: Global via Nix home-manager. Plugin registered in `opencode.json`.
+
+**Research Findings**:
+- **Openclaw architecture**: SQLite + FTS5 + vec0. MemoryIndexManager (1590 lines) handles file watching, chunking (tiktoken, 400 tokens/80 overlap), embedding (multi-provider), hybrid scoring (0.7 vector + 0.3 BM25). Two sources (memory files + session transcripts). Two tools (search + get).
+- **Opencode plugin API**: JS/TS modules with event hooks. Key events: session.created, session.idle, session.compacted, experimental.session.compacting. Plugin context: { project, client, $, directory, worktree }. Custom tools via tool() helper with Zod schemas.
+- **Opencode session storage**: JSON at `~/.local/share/opencode/storage/`. Sessions in `session/{project_hash}/ses_*.json`. Messages in `message/{session_id}/msg_*.json`. Fields: id, sessionID, role, agent, model, timestamps.
+- **User's opencode config**: 3 existing plugins (oh-my-opencode, opencode-beads, opencode-antigravity-auth@beta). 6 agents. Google/Antigravity provider. Nix deployment.
+
+### Metis Review
+**Identified Gaps** (all addressed):
+- **vec0 availability**: Added verification step in Task 1. Fallback strategy if unavailable.
+- **SQLite concurrency**: WAL mode + single write queue. Addressed in Task 2.
+- **Embedding failure handling**: try/catch + queue + retry + graceful degradation. Addressed in Task 4 and Task 12.
+- **Token budget for injection**: Hard limit 2000 tokens. Configurable. Addressed in Task 10.
+- **Index rebuild**: `--rebuild` command via CLI entry point. Addressed in Task 12.
+- **File sync conflicts**: Atomic writes (temp file + rename). Addressed in Task 5.
+- **Deduplication/expiration**: Deferred to Phase 2. Scope locked.
+- **Multi-project scope**: Global search by default. Configurable later. Phase 2.
+
+---
+
+## Work Objectives
+
+### Core Objective
+Build a standalone Opencode plugin that provides persistent, reliable, hybrid (vector + keyword) memory for all agent sessions, powered by SQLite+FTS5+vec0 over Obsidian markdown files.
+
+### Concrete Deliverables
+- `opencode-memory/` — Standalone TypeScript repo with bun
+- `src/index.ts` — Opencode plugin entry point (hooks + tools)
+- `src/config.ts` — Configuration module (paths, defaults, overrides)
+- `src/db.ts` — SQLite database initialization + schema + migrations
+- `src/discovery.ts` — Markdown file discovery + text chunking
+- `src/embeddings.ts` — OpenAI embedding provider + content-hash cache
+- `src/indexer.ts` — File indexer (file → chunks → embeddings → SQLite)
+- `src/search.ts` — Hybrid search engine (FTS5 BM25 + vec0 cosine)
+- `src/sessions.ts` — Opencode session transcript parser + indexer
+- `src/tools.ts` — Agent tools (memory_search, memory_store, memory_get)
+- `src/types.ts` — Shared TypeScript types
+- Full test suite in `src/__tests__/` (TDD, bun test)
+- Updated AGENTS repo: `skills/memory/SKILL.md` + deprecation notes
+
+### Definition of Done
+- [ ] `bun test` passes all tests (0 failures)
+- [ ] Plugin loads in Opencode without errors
+- [ ] `memory_search` returns hybrid results from vault + session transcripts
+- [ ] `memory_store` creates markdown file + indexes it
+- [ ] `memory_get` reads specific file/line ranges
+- [ ] Auto-recall injects relevant memories on session.created
+- [ ] Auto-capture stores conversation insights on session.idle
+- [ ] Embedding cache avoids re-embedding unchanged content
+- [ ] SQLite can be rebuilt from markdown files alone (`--rebuild`)
+- [ ] Plugin fails gracefully (no crashes) when OpenAI is unavailable
+
+### Must Have
+- Hybrid search (vector 0.7 + BM25 0.3 weights, configurable)
+- OpenAI text-embedding-3 with content-hash caching
+- Markdown source of truth at `~/CODEX/80-memory/`
+- SQLite derived index at `~/.local/share/opencode-memory/index.db`
+- Session transcript indexing from Opencode storage
+- Graceful degradation on API/DB failures
+- WAL mode for SQLite concurrent reads
+- Atomic markdown writes (temp file + rename)
+- Configurable chunk size (default 400 tokens, 80 overlap)
+- Token budget limit for memory injection (default 2000 tokens)
+
+### Must NOT Have (Guardrails)
+- **MUST NOT** block session operations if memory system fails — degraded mode > broken sessions
+- **MUST NOT** exceed configurable token budget (default 2000) for memory context injection
+- **MUST NOT** write files outside `~/CODEX/80-memory/` directory
+- **MUST NOT** depend on Obsidian REST API — filesystem only
+- **MUST NOT** depend on mem0 or qdrant — fully standalone
+- **MUST NOT** implement memory deduplication (Phase 2)
+- **MUST NOT** implement memory expiration/archival (Phase 2)
+- **MUST NOT** implement memory graph/relationships (Phase 2)
+- **MUST NOT** support multiple vaults (Phase 2)
+- **MUST NOT** implement additional embedding providers beyond OpenAI (Phase 2)
+- **MUST NOT** implement admin CLI/dashboard UI (Phase 2)
+- **MUST NOT** auto-summarize memories (Phase 2)
+- **MUST NOT** store embedding vectors in markdown files — SQLite only
+- **MUST NOT** hard-code paths — use config with sensible defaults
+
+---
+
+## Verification Strategy (MANDATORY)
+
+> **UNIVERSAL RULE: ZERO HUMAN INTERVENTION**
+>
+> ALL tasks in this plan MUST be verifiable WITHOUT any human action.
+> This is NOT conditional — it applies to EVERY task, regardless of test strategy.
+>
+> **FORBIDDEN** — acceptance criteria that require:
+> - "User manually tests..." / "User visually confirms..."
+> - "User interacts with..." / "Ask user to verify..."
+> - ANY step where a human must perform an action
+>
+> **ALL verification is executed by the agent** using tools (Bash, interactive_bash, etc.). No exceptions.
+
+### Test Decision
+- **Infrastructure exists**: NO (new repo, needs setup)
+- **Automated tests**: TDD (RED-GREEN-REFACTOR)
+- **Framework**: bun test (built into bun runtime)
+
+### TDD Workflow
+
+Each TODO follows RED-GREEN-REFACTOR:
+
+**Task Structure:**
+1. **RED**: Write failing test first
+ - Test file: `src/__tests__/{module}.test.ts`
+ - Test command: `bun test src/__tests__/{module}.test.ts`
+ - Expected: FAIL (test exists, implementation doesn't)
+2. **GREEN**: Implement minimum code to pass
+ - Command: `bun test src/__tests__/{module}.test.ts`
+ - Expected: PASS
+3. **REFACTOR**: Clean up while keeping green
+ - Command: `bun test`
+ - Expected: PASS (all tests still green)
+
+### Agent-Executed QA Scenarios (MANDATORY — ALL tasks)
+
+> Whether TDD is enabled or not, EVERY task MUST include Agent-Executed QA Scenarios.
+> With TDD: QA scenarios complement unit tests at integration/E2E level.
+
+**Verification Tool by Deliverable Type:**
+
+| Type | Tool | How Agent Verifies |
+|------|------|-------------------|
+| TypeScript modules | Bash (bun test) | Run unit tests, check pass/fail |
+| SQLite operations | Bash (bun run) | Execute script, inspect DB with sqlite3 CLI |
+| Plugin integration | interactive_bash (tmux) | Load plugin in opencode, verify hooks fire |
+| File I/O | Bash | Create/read/delete files, verify filesystem state |
+| API integration | Bash (bun run) | Call OpenAI, verify embedding dimensions |
+
+---
+
+## Execution Strategy
+
+### Parallel Execution Waves
+
+```
+Wave 1 (Start Immediately):
+└── Task 1: Repo scaffold + test infrastructure + vec0 verification
+
+Wave 2 (After Wave 1):
+├── Task 2: Configuration module
+├── Task 3: SQLite schema + database module
+└── Task 4: Markdown file discovery + text chunking
+
+Wave 3 (After Wave 2):
+├── Task 5: Embedding provider + cache (depends: 3)
+├── Task 6: File indexer pipeline (depends: 3, 4, 5)
+└── Task 7: Session transcript parser (depends: 3, 4)
+
+Wave 4 (After Wave 3):
+├── Task 8: FTS5 BM25 search (depends: 6)
+├── Task 9: Vector search (depends: 6)
+└── Task 10: Hybrid search combiner (depends: 8, 9)
+
+Wave 5 (After Wave 4):
+├── Task 11: Agent tools — memory_search, memory_store, memory_get (depends: 10, 7)
+└── Task 12: Plugin entry point — hooks + lifecycle (depends: 11)
+
+Wave 6 (After Wave 5):
+├── Task 13: Integration testing + error handling + rebuild command (depends: 12)
+└── Task 14: AGENTS repo skill updates + deployment config (depends: 13)
+```
+
+### Dependency Matrix
+
+| Task | Depends On | Blocks | Can Parallelize With |
+|------|------------|--------|---------------------|
+| 1 | None | 2, 3, 4 | None (foundation) |
+| 2 | 1 | 5, 6, 7, 8, 9, 10, 11, 12 | 3, 4 |
+| 3 | 1 | 5, 6, 7, 8, 9 | 2, 4 |
+| 4 | 1 | 6, 7 | 2, 3 |
+| 5 | 3 | 6 | 7 |
+| 6 | 3, 4, 5 | 8, 9 | None |
+| 7 | 3, 4 | 11 | 5 |
+| 8 | 6 | 10 | 9 |
+| 9 | 6 | 10 | 8 |
+| 10 | 8, 9 | 11 | None |
+| 11 | 10, 7 | 12 | None |
+| 12 | 11 | 13 | None |
+| 13 | 12 | 14 | None |
+| 14 | 13 | None | None |
+
+### Agent Dispatch Summary
+
+| Wave | Tasks | Recommended Agents |
+|------|-------|-------------------|
+| 1 | 1 | task(category="unspecified-high", load_skills=[], run_in_background=false) |
+| 2 | 2, 3, 4 | dispatch 3 parallel tasks after Wave 1 |
+| 3 | 5, 6, 7 | sequential: 5 then 6 (depends on 5); 7 parallel with 5 |
+| 4 | 8, 9, 10 | 8 and 9 parallel; 10 after both |
+| 5 | 11, 12 | sequential |
+| 6 | 13, 14 | sequential (14 is in AGENTS repo, different workdir) |
+
+---
+
+## TODOs
+
+- [x] 1. Initialize Repository Scaffold + Test Infrastructure
+
+ **What to do**:
+ - Create new git repo `opencode-memory/` at `~/p/AI/opencode-memory/`
+ - Initialize with `bun init`
+ - Install dependencies: `better-sqlite3`, `openai`, `tiktoken`, `chokidar`, `zod`
+ - Install dev dependencies: `@types/better-sqlite3`, `typescript`
+ - Create `tsconfig.json` (target ES2022, module ESNext, strict mode, paths alias)
+ - Create `src/` directory structure:
+ ```
+ src/
+ ├── __tests__/
+ ├── index.ts (plugin entry — stub)
+ ├── config.ts (stub)
+ ├── db.ts (stub)
+ ├── discovery.ts (stub)
+ ├── embeddings.ts (stub)
+ ├── indexer.ts (stub)
+ ├── search.ts (stub)
+ ├── sessions.ts (stub)
+ ├── tools.ts (stub)
+ └── types.ts (stub)
+ ```
+ - Verify `bun test` runs (create example test)
+ - **CRITICAL**: Verify SQLite vec0 extension availability:
+ - Try: `import Database from 'better-sqlite3'; db.loadExtension('vec0')` or check if `sqlite-vec` npm package works
+ - If vec0 unavailable: document findings, check `sqlite-vec` npm package as alternative, or plan for `@anthropic-ai/sdk` vector operations
+ - This is a blocking verification — if vec0 doesn't work, architecture needs adjustment
+ - Create `.gitignore` (node_modules, dist, *.db, .env)
+ - Create `package.json` with `"type": "module"`, scripts for test/build
+
+ **Must NOT do**:
+ - Don't implement any real logic — stubs only
+ - Don't configure Nix packaging yet (Task 14)
+ - Don't create README or documentation files
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: Repo scaffolding with critical platform verification (vec0). Not purely visual or algorithmic, but requires careful setup.
+ - **Skills**: none needed
+ - **Skills Evaluated but Omitted**:
+ - `frontend-ui-ux`: No UI involved
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (foundation task)
+ - **Parallel Group**: Wave 1 (solo)
+ - **Blocks**: Tasks 2, 3, 4
+ - **Blocked By**: None
+
+ **References**:
+
+ **Pattern References** (existing code to follow):
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:1-50` — Imports and dependency list (shows what Openclaw uses: better-sqlite3, tiktoken, chokidar, etc.)
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/types.ts` — TypeScript type definitions for memory system
+
+ **API/Type References**:
+ - Opencode plugin structure: `export default function(ctx) { ... }` — see Opencode plugin docs
+
+ **External References**:
+ - SQLite vec0: `https://github.com/asg017/sqlite-vec` — vec0 extension for vector search in SQLite
+ - better-sqlite3: `https://github.com/WiseLibs/better-sqlite3` — Synchronous SQLite3 for Node.js
+ - Opencode plugin docs: `https://opencode.ai/docs/plugins/` — Plugin API and lifecycle
+
+ **Acceptance Criteria**:
+
+ **TDD (setup verification):**
+ - [ ] `bun test` runs and passes at least 1 example test
+ - [ ] `better-sqlite3` imports successfully
+ - [ ] vec0 extension loads or alternative documented
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Repo initializes and tests pass
+ Tool: Bash
+ Preconditions: ~/p/AI/opencode-memory/ does not exist
+ Steps:
+ 1. ls ~/p/AI/opencode-memory/ → should not exist
+ 2. After task: ls ~/p/AI/opencode-memory/src/ → should list all stub files
+ 3. bun test (in opencode-memory/) → 1 test passes, 0 failures
+ 4. bun run -e "import Database from 'better-sqlite3'; const db = new Database(':memory:'); console.log('SQLite OK:', db.pragma('journal_mode', { simple: true }))"
+ → prints "SQLite OK: memory" (or "wal")
+ Expected Result: Repo exists, tests pass, SQLite works
+ Evidence: Terminal output captured
+
+ Scenario: vec0 extension availability check
+ Tool: Bash
+ Preconditions: opencode-memory/ initialized with better-sqlite3
+ Steps:
+ 1. bun run -e "import Database from 'better-sqlite3'; const db = new Database(':memory:'); try { db.loadExtension('vec0'); console.log('vec0: AVAILABLE') } catch(e) { console.log('vec0: NOT AVAILABLE -', e.message) }"
+ 2. If NOT AVAILABLE: try `bun add sqlite-vec` and test with that package's loading mechanism
+ 3. Document result in src/db.ts as comment
+ Expected Result: vec0 status determined (available or alternative found)
+ Evidence: Terminal output + documented in code comment
+ ```
+
+ **Commit**: YES
+ - Message: `feat(scaffold): initialize opencode-memory repo with test infrastructure`
+ - Files: all scaffold files
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 2. Configuration Module
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/config.test.ts`:
+ - Test: default config returns valid paths for vault, db, session storage
+ - Test: config overrides work (custom vault path, custom db path)
+ - Test: config validates paths (vault must be absolute)
+ - Test: config has correct defaults for chunk size (400), overlap (80), weights (0.7/0.3), minScore (0.35), maxResults (6), tokenBudget (2000)
+ - **GREEN**: Implement `src/config.ts`:
+ - Define `MemoryConfig` interface with all configuration fields
+ - Default vault path: `~/CODEX/80-memory/`
+ - Default DB path: `~/.local/share/opencode-memory/index.db`
+ - Default session path: `~/.local/share/opencode/storage/`
+ - Chunking: `{ tokens: 400, overlap: 80 }`
+ - Search: `{ vectorWeight: 0.7, textWeight: 0.3, minScore: 0.35, maxResults: 6 }`
+ - Embedding: `{ model: "text-embedding-3-small", dimensions: 1536 }`
+ - TokenBudget: `{ maxInjectTokens: 2000 }`
+ - `resolveConfig(overrides?: Partial): MemoryConfig` — merges overrides with defaults, expands `~` to `$HOME`
+ - **REFACTOR**: Extract types to `src/types.ts`
+
+ **Must NOT do**:
+ - Don't read from config files on disk (hardcoded defaults + programmatic overrides)
+ - Don't implement environment variable loading (keep simple)
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - Reason: Small, focused module. Config is straightforward.
+ - **Skills**: none
+ - **Skills Evaluated but Omitted**:
+ - All: This is a simple data structure + defaults task
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 2 (with Tasks 3, 4)
+ - **Blocks**: Tasks 5, 6, 7, 8, 9, 10, 11, 12
+ - **Blocked By**: Task 1
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/agents/memory-search.ts` — Openclaw's config resolution pattern (defaults + overrides)
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/backend-config.ts` — Backend configuration with defaults
+
+ **API/Type References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/types.ts` — Config type definitions to adapt
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/config.test.ts`
+ - [ ] `bun test src/__tests__/config.test.ts` → PASS (all config tests green)
+ - [ ] Default config has all required fields with correct values
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Default config returns correct values
+ Tool: Bash (bun test)
+ Preconditions: Task 1 complete, repo initialized
+ Steps:
+ 1. bun test src/__tests__/config.test.ts
+ 2. Assert: all tests pass
+ 3. Assert: default vault path ends with "CODEX/80-memory"
+ 4. Assert: chunk tokens = 400, overlap = 80
+ 5. Assert: vector weight = 0.7, text weight = 0.3
+ Expected Result: All config defaults correct
+ Evidence: Test output captured
+ ```
+
+ **Commit**: YES (groups with 3, 4)
+ - Message: `feat(config): add configuration module with sensible defaults`
+ - Files: `src/config.ts`, `src/types.ts`, `src/__tests__/config.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 3. SQLite Schema + Database Module
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/db.test.ts`:
+ - Test: `initDatabase(":memory:")` creates all tables (meta, files, chunks, embedding_cache, chunks_fts, chunks_vec)
+ - Test: `meta` table stores schema version
+ - Test: `files` table accepts inserts with (path, source, hash, indexed_at)
+ - Test: `chunks` table accepts inserts with (id, file_path, start_line, end_line, content_hash, model, text, embedding BLOB)
+ - Test: `embedding_cache` table stores (content_hash, model, embedding BLOB, created_at)
+ - Test: FTS5 virtual table `chunks_fts` is searchable
+ - Test: vec0 virtual table `chunks_vec` is searchable (or skip if vec0 unavailable — see Task 1 findings)
+ - Test: WAL mode is enabled
+ - Test: `closeDatabase()` closes cleanly
+ - **GREEN**: Implement `src/db.ts`:
+ - `initDatabase(dbPath: string): Database` — creates/opens SQLite, runs schema, enables WAL
+ - Schema (following Openclaw's `memory-schema.ts`):
+ ```sql
+ CREATE TABLE IF NOT EXISTS meta (key TEXT PRIMARY KEY, value TEXT);
+ CREATE TABLE IF NOT EXISTS files (path TEXT PRIMARY KEY, source TEXT NOT NULL, hash TEXT NOT NULL, indexed_at INTEGER NOT NULL);
+ CREATE TABLE IF NOT EXISTS chunks (id TEXT PRIMARY KEY, file_path TEXT NOT NULL REFERENCES files(path), start_line INTEGER, end_line INTEGER, content_hash TEXT NOT NULL, model TEXT NOT NULL, text TEXT NOT NULL, embedding BLOB);
+ CREATE TABLE IF NOT EXISTS embedding_cache (content_hash TEXT NOT NULL, model TEXT NOT NULL, embedding BLOB NOT NULL, created_at INTEGER NOT NULL, PRIMARY KEY (content_hash, model));
+ CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(text, content='chunks', content_rowid='rowid');
+ CREATE VIRTUAL TABLE IF NOT EXISTS chunks_vec USING vec0(embedding float[1536]);
+ ```
+ - Store schema version in meta table
+ - Enable WAL mode: `PRAGMA journal_mode=WAL`
+ - Enable foreign keys: `PRAGMA foreign_keys=ON`
+ - Load vec0 extension (or handle unavailability gracefully)
+ - **REFACTOR**: Add helper functions for common DB operations
+
+ **Must NOT do**:
+ - Don't implement migration logic (v1 schema only)
+ - Don't add indexes beyond what schema requires (premature optimization)
+ - Don't implement any search logic (Task 8, 9)
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: SQLite schema with extensions (FTS5, vec0) requires careful handling. Extension loading may need platform-specific workarounds.
+ - **Skills**: none
+ - **Skills Evaluated but Omitted**:
+ - All: Pure database schema work, no domain-specific skill needed
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 2 (with Tasks 2, 4)
+ - **Blocks**: Tasks 5, 6, 7, 8, 9
+ - **Blocked By**: Task 1 (needs vec0 findings)
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/memory-schema.ts` — EXACT schema to follow (adapt table names/columns). This is the primary reference — copy the structure closely.
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:80-150` — Database initialization logic, WAL mode, extension loading
+
+ **External References**:
+ - better-sqlite3 API: `https://github.com/WiseLibs/better-sqlite3/blob/master/docs/api.md`
+ - FTS5 docs: `https://www.sqlite.org/fts5.html`
+ - vec0 docs: `https://alexgarcia.xyz/sqlite-vec/`
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/db.test.ts`
+ - [ ] `bun test src/__tests__/db.test.ts` → PASS
+ - [ ] All 8 schema tests pass
+ - [ ] WAL mode enabled (verified via PRAGMA)
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Database creates all tables and extensions
+ Tool: Bash (bun run)
+ Preconditions: Task 1 complete
+ Steps:
+ 1. bun test src/__tests__/db.test.ts
+ 2. Assert: all tests pass
+ 3. bun run -e "import { initDatabase } from './src/db'; const db = initDatabase(':memory:'); console.log(db.pragma('journal_mode', {simple:true})); console.log(JSON.stringify(db.prepare('SELECT name FROM sqlite_master WHERE type=\"table\"').all()))"
+ 4. Assert: journal_mode = "wal"
+ 5. Assert: tables include "meta", "files", "chunks", "embedding_cache"
+ Expected Result: Schema created correctly with WAL mode
+ Evidence: Terminal output captured
+
+ Scenario: FTS5 virtual table is functional
+ Tool: Bash (bun run)
+ Preconditions: Database module implemented
+ Steps:
+ 1. Create in-memory db, insert test chunk with text "TypeScript is my preferred language"
+ 2. Query: SELECT * FROM chunks_fts WHERE chunks_fts MATCH 'TypeScript'
+ 3. Assert: 1 result returned
+ Expected Result: FTS5 search returns matching content
+ Evidence: Terminal output captured
+ ```
+
+ **Commit**: YES (groups with 2, 4)
+ - Message: `feat(db): SQLite schema with FTS5, vec0, WAL mode`
+ - Files: `src/db.ts`, `src/__tests__/db.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 4. Markdown File Discovery + Text Chunking
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/discovery.test.ts`:
+ - Test: `discoverFiles(vaultPath)` finds all .md files recursively
+ - Test: ignores non-.md files (images, PDFs, etc.)
+ - Test: returns absolute paths with metadata (size, mtime)
+ - Test: handles empty directory (returns [])
+ - Test: handles non-existent directory (throws descriptive error)
+ - Test: `computeFileHash(content)` returns consistent SHA-256 hex
+ - Test: `chunkText(text, { tokens: 400, overlap: 80 })` splits correctly
+ - Test: chunks have start_line and end_line metadata
+ - Test: chunks overlap correctly (last 80 tokens of chunk N = first 80 tokens of chunk N+1)
+ - Test: single small file (< 400 tokens) returns 1 chunk
+ - Test: empty file returns 0 chunks
+ - Test: chunk content_hash is stable for same content
+ - **GREEN**: Implement `src/discovery.ts`:
+ - `discoverFiles(vaultPath: string, extensions?: string[]): FileInfo[]` — recursive glob for .md files
+ - `computeFileHash(content: string): string` — SHA-256 hex hash
+ - `chunkText(text: string, config: ChunkConfig): Chunk[]` — split text by token count using tiktoken (cl100k_base encoding, matching Openclaw). Each chunk gets start_line/end_line and content_hash.
+ - `readFileContent(filePath: string): string` — read file with UTF-8, handle encoding errors
+ - **REFACTOR**: Optimize chunking for large files, ensure stable hashing
+
+ **Must NOT do**:
+ - Don't implement file watching (that's in indexer lifecycle, Task 6)
+ - Don't parse YAML frontmatter (just treat as text for now)
+ - Don't handle binary files (filter by extension)
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-low`
+ - Reason: Straightforward file I/O and text processing. No complex algorithms.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 2 (with Tasks 2, 3)
+ - **Blocks**: Tasks 6, 7
+ - **Blocked By**: Task 1
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/internal.ts` — File discovery functions, chunking logic, hash computation. This is the PRIMARY reference for chunking algorithm.
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:200-350` — How files are discovered and processed
+
+ **External References**:
+ - tiktoken: `https://github.com/openai/tiktoken` — Token counting for chunking
+ - Node.js crypto: Built-in `crypto.createHash('sha256')` for hashing
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/discovery.test.ts`
+ - [ ] `bun test src/__tests__/discovery.test.ts` → PASS (all 12 tests)
+ - [ ] Uses tiktoken cl100k_base encoding for token counting
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Discover files in test fixture directory
+ Tool: Bash
+ Preconditions: Create test fixture dir with 3 .md files and 1 .png
+ Steps:
+ 1. mkdir -p /tmp/test-vault && echo "# Test" > /tmp/test-vault/note1.md && echo "# Test 2" > /tmp/test-vault/note2.md && echo "deep" > /tmp/test-vault/sub/note3.md && touch /tmp/test-vault/image.png
+ 2. bun run -e "import { discoverFiles } from './src/discovery'; console.log(JSON.stringify(discoverFiles('/tmp/test-vault')))"
+ 3. Assert: 3 files returned (all .md), image.png excluded
+ Expected Result: Only .md files discovered
+ Evidence: Terminal output
+
+ Scenario: Chunk text with correct overlap
+ Tool: Bash (bun test)
+ Preconditions: Discovery module implemented
+ Steps:
+ 1. bun test src/__tests__/discovery.test.ts --filter "overlap"
+ 2. Assert: overlap test passes
+ 3. Verify chunk N end overlaps with chunk N+1 start
+ Expected Result: Chunks overlap by configured token count
+ Evidence: Test output
+ ```
+
+ **Commit**: YES (groups with 2, 3)
+ - Message: `feat(discovery): markdown file discovery and text chunking with tiktoken`
+ - Files: `src/discovery.ts`, `src/__tests__/discovery.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 5. Embedding Provider + Content-Hash Cache
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/embeddings.test.ts`:
+ - Test: `EmbeddingProvider.embed(text)` returns float array of correct dimensions (1536 for text-embedding-3-small)
+ - Test: `EmbeddingProvider.embedBatch(texts)` handles multiple texts
+ - Test: Cache stores embedding by (content_hash, model) key
+ - Test: Cache hit returns stored embedding without API call (mock API)
+ - Test: Cache miss calls API, stores result, returns embedding
+ - Test: API failure throws descriptive error (does NOT crash)
+ - Test: API rate limit triggers retry with exponential backoff (mock)
+ - Test: `embeddingToBuffer(float[])` converts to Buffer for SQLite BLOB storage
+ - Test: `bufferToEmbedding(Buffer)` converts back to float[]
+ - **GREEN**: Implement `src/embeddings.ts`:
+ - `class EmbeddingProvider`:
+ - Constructor takes `{ apiKey, model, dimensions, db }` — db for cache table
+ - `async embed(text: string): Promise` — check cache first, then API
+ - `async embedBatch(texts: string[], hashes: string[]): Promise` — batch with cache check per item
+ - Cache read: `SELECT embedding FROM embedding_cache WHERE content_hash = ? AND model = ?`
+ - Cache write: `INSERT INTO embedding_cache (content_hash, model, embedding, created_at) VALUES (?, ?, ?, ?)`
+ - API call: `openai.embeddings.create({ model, input, dimensions })`
+ - Retry logic: exponential backoff (1s, 2s, 4s) on 429/500, max 3 retries
+ - `embeddingToBuffer(embedding: number[]): Buffer` — Float32Array → Buffer
+ - `bufferToEmbedding(buffer: Buffer): number[]` — Buffer → Float32Array → number[]
+ - **REFACTOR**: Extract cache logic to separate internal function
+
+ **Must NOT do**:
+ - Don't support other embedding providers (Phase 2)
+ - Don't implement local/offline fallback (Phase 2)
+ - Don't implement cost tracking
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: API integration with retry logic, binary serialization, caching. Moderate complexity.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (parallel with Task 7)
+ - **Parallel Group**: Wave 3 (first in wave — 6 depends on this)
+ - **Blocks**: Task 6
+ - **Blocked By**: Task 3 (needs db for cache table)
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/embeddings.ts` — Multi-provider embedding system. Focus on the OpenAI provider implementation and the cache logic. Copy the binary serialization (Float32Array ↔ Buffer).
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:400-500` — How embeddings are called and cached during indexing
+
+ **External References**:
+ - OpenAI Embeddings API: `https://platform.openai.com/docs/api-reference/embeddings`
+ - OpenAI npm: `https://github.com/openai/openai-node`
+
+ **WHY Each Reference Matters**:
+ - `embeddings.ts`: Exact binary serialization pattern (Float32Array ↔ Buffer) is critical for SQLite BLOB storage. Also shows retry logic.
+ - `manager.ts:400-500`: Shows how cache is checked before API call, and how batch embedding works.
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/embeddings.test.ts`
+ - [ ] `bun test src/__tests__/embeddings.test.ts` → PASS
+ - [ ] Cache hit skips API call (verified via mock)
+ - [ ] Buffer conversion round-trips correctly
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Embedding produces correct dimensions
+ Tool: Bash (bun run)
+ Preconditions: OPENAI_API_KEY set in environment
+ Steps:
+ 1. bun run -e "import { EmbeddingProvider } from './src/embeddings'; import { initDatabase } from './src/db'; const db = initDatabase(':memory:'); const ep = new EmbeddingProvider({ db, model: 'text-embedding-3-small' }); const emb = await ep.embed('test'); console.log('dimensions:', emb.length)"
+ 2. Assert: dimensions = 1536
+ Expected Result: Embedding has 1536 dimensions
+ Evidence: Terminal output
+
+ Scenario: Cache prevents duplicate API calls
+ Tool: Bash (bun test)
+ Preconditions: Embeddings module with mock
+ Steps:
+ 1. bun test src/__tests__/embeddings.test.ts --filter "cache"
+ 2. Assert: mock API called once for first embed, zero times for second (same content)
+ Expected Result: Second call uses cache
+ Evidence: Test output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(embeddings): OpenAI embedding provider with content-hash cache`
+ - Files: `src/embeddings.ts`, `src/__tests__/embeddings.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 6. File Indexer Pipeline
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/indexer.test.ts`:
+ - Test: `indexFile(filePath, source, db, embedder)` reads file, chunks it, embeds chunks, stores in SQLite
+ - Test: file hash is stored in `files` table
+ - Test: chunks are stored in `chunks` table with correct file_path, start_line, end_line
+ - Test: FTS5 table is populated with chunk text
+ - Test: vec0 table is populated with embeddings (if available)
+ - Test: re-indexing unchanged file (same hash) is a no-op
+ - Test: re-indexing changed file (different hash) replaces old chunks
+ - Test: `removeFile(filePath, db)` removes file + all its chunks from all tables
+ - Test: `indexDirectory(vaultPath, source, db, embedder)` indexes all .md files
+ - Test: `indexDirectory` skips already-indexed files with same hash
+ - Test: `indexDirectory` removes files that no longer exist on disk
+ - **GREEN**: Implement `src/indexer.ts`:
+ - `async indexFile(filePath, source, db, embedder, config)`:
+ 1. Read file content
+ 2. Compute file hash
+ 3. Check if file already indexed with same hash → skip if unchanged
+ 4. Delete old chunks for this file (if re-indexing)
+ 5. Chunk the text (using discovery.chunkText)
+ 6. Embed all chunks (using embedder.embedBatch — leverages cache)
+ 7. Insert file record into `files` table
+ 8. Insert chunks into `chunks`, `chunks_fts`, `chunks_vec` tables
+ 9. Wrap in transaction for atomicity
+ - `removeFile(filePath, db)`: Delete from files, chunks, chunks_fts, chunks_vec
+ - `async indexDirectory(vaultPath, source, db, embedder, config)`:
+ 1. Discover all .md files
+ 2. Get list of currently indexed files from DB
+ 3. Remove files from DB that no longer exist on disk
+ 4. Index new/changed files
+ 5. Skip unchanged files (hash match)
+ - **REFACTOR**: Ensure all DB operations are in transactions
+
+ **Must NOT do**:
+ - Don't implement file watching (lifecycle concern, Task 12)
+ - Don't implement session indexing (Task 7)
+ - Don't add progress reporting (Phase 2)
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: Core pipeline orchestrating discovery, embedding, and database operations. Transaction management. Most complex single task.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (depends on 3, 4, 5)
+ - **Parallel Group**: Wave 3 (after Task 5 completes)
+ - **Blocks**: Tasks 8, 9
+ - **Blocked By**: Tasks 3, 4, 5
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:350-600` — `syncFiles()` method: the EXACT pattern for indexing. Shows hash checking, chunk insertion, FTS5/vec0 population, transaction wrapping. This is the PRIMARY reference.
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:600-800` — How file removal and re-indexing works
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/indexer.test.ts`
+ - [ ] `bun test src/__tests__/indexer.test.ts` → PASS (all 11 tests)
+ - [ ] Unchanged files are skipped (hash check)
+ - [ ] Changed files replace old chunks (not append)
+ - [ ] Deleted files are removed from index
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Index a directory of markdown files
+ Tool: Bash
+ Preconditions: DB module, discovery, embeddings all working. Test fixtures exist.
+ Steps:
+ 1. Create 3 test .md files in /tmp/test-vault/
+ 2. bun run -e "import { indexDirectory } from './src/indexer'; import { initDatabase } from './src/db'; import { EmbeddingProvider } from './src/embeddings'; const db = initDatabase(':memory:'); const ep = new EmbeddingProvider({db, model:'text-embedding-3-small'}); await indexDirectory('/tmp/test-vault', 'memory', db, ep, defaultConfig); const files = db.prepare('SELECT * FROM files').all(); const chunks = db.prepare('SELECT * FROM chunks').all(); console.log('files:', files.length, 'chunks:', chunks.length)"
+ 3. Assert: files = 3, chunks > 3 (depends on content length)
+ Expected Result: All files indexed with chunks in DB
+ Evidence: Terminal output
+
+ Scenario: Re-index unchanged files is a no-op
+ Tool: Bash
+ Preconditions: Directory already indexed
+ Steps:
+ 1. Run indexDirectory twice on same unchanged directory
+ 2. Count API calls to embedding provider (mock)
+ 3. Assert: 0 embedding API calls on second run
+ Expected Result: No re-embedding of unchanged content
+ Evidence: Test output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(indexer): file indexer pipeline with hash-based change detection`
+ - Files: `src/indexer.ts`, `src/__tests__/indexer.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 7. Session Transcript Parser + Indexer
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/sessions.test.ts`:
+ - Test: `discoverSessions(storagePath)` finds all session directories
+ - Test: `parseSession(sessionDir)` reads session JSON + message JSONs, returns structured transcript
+ - Test: `sessionToText(session)` converts to "User: ... / Assistant: ..." text format
+ - Test: handles session with 0 messages (returns empty text)
+ - Test: handles corrupted/missing JSON files gracefully (skip, don't crash)
+ - Test: `indexSessions(storagePath, db, embedder, config)` indexes all session transcripts
+ - Test: already-indexed sessions (by file hash) are skipped
+ - Test: new sessions since last index are added
+ - **GREEN**: Implement `src/sessions.ts`:
+ - `discoverSessions(storagePath: string): SessionDir[]` — find all `message/{session_id}/` directories under storage path. Also check project-specific dirs in `session/{hash}/`.
+ - `parseSession(sessionDir: string): ParsedSession` — read all msg_*.json files, sort by timestamp, extract role + content fields. Handle missing/corrupt files with try/catch.
+ - `sessionToText(session: ParsedSession): string` — format as:
+ ```
+ [Session: {title}] [{date}]
+ User: {message}
+ Assistant: {message}
+ ...
+ ```
+ - `async indexSessions(storagePath, db, embedder, config)`:
+ 1. Discover all session directories
+ 2. For each: compute hash of combined message content
+ 3. Skip if already indexed with same hash
+ 4. Convert to text, chunk, embed, store with source="sessions"
+ - **REFACTOR**: Handle edge cases (empty sessions, partial data)
+
+ **Must NOT do**:
+ - Don't index tool call details (just user/assistant messages)
+ - Don't implement session filtering (all sessions indexed)
+ - Don't implement incremental message indexing (whole session = one unit)
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: Parsing JSON files from unknown directory structure, handling corruption, integrating with indexer pipeline. Moderate complexity.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (parallel with Task 5)
+ - **Parallel Group**: Wave 3
+ - **Blocks**: Task 11
+ - **Blocked By**: Tasks 3, 4
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/session-files.ts` — Session transcript conversion. Shows how JSONL transcripts are converted to searchable text. Adapt for Opencode's JSON format.
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:800-1000` — How session sources are handled alongside memory sources
+
+ **API/Type References**:
+ - Opencode session JSON format (discovered during research):
+ - Session: `{ id, slug, projectID, directory, title, time: { created, updated }, summary }`
+ - Message: `{ id, sessionID, role, time: { created }, agent, model }`
+ - Session storage: `~/.local/share/opencode/storage/session/{project_hash}/ses_*.json`
+ - Message storage: `~/.local/share/opencode/storage/message/{session_id}/msg_*.json`
+
+ **WHY Each Reference Matters**:
+ - `session-files.ts`: Exact pattern for converting conversation transcripts to text format suitable for chunking and embedding.
+ - Session JSON format: Needed to parse the actual message content from Opencode's storage.
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/sessions.test.ts`
+ - [ ] `bun test src/__tests__/sessions.test.ts` → PASS (all 8 tests)
+ - [ ] Handles corrupt JSON without crashing
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Parse real Opencode session transcripts
+ Tool: Bash
+ Preconditions: Opencode storage exists at ~/.local/share/opencode/storage/
+ Steps:
+ 1. bun run -e "import { discoverSessions } from './src/sessions'; const sessions = discoverSessions(process.env.HOME + '/.local/share/opencode/storage'); console.log('found sessions:', sessions.length)"
+ 2. Assert: sessions.length > 0
+ 3. Parse first session and verify text output contains "User:" and "Assistant:" markers
+ Expected Result: Real session transcripts parseable
+ Evidence: Terminal output (first 200 chars of transcript)
+
+ Scenario: Corrupt JSON file doesn't crash parser
+ Tool: Bash
+ Preconditions: Test fixture with corrupt JSON
+ Steps:
+ 1. Create test dir with valid session JSON + one corrupt msg file (invalid JSON)
+ 2. bun run -e "import { parseSession } from './src/sessions'; const s = parseSession('/tmp/test-session'); console.log('messages:', s.messages.length)"
+ 3. Assert: no error thrown, corrupt message skipped
+ Expected Result: Graceful handling, partial results
+ Evidence: Terminal output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(sessions): Opencode session transcript parser and indexer`
+ - Files: `src/sessions.ts`, `src/__tests__/sessions.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 8. FTS5 BM25 Search Module
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/search-fts.test.ts`:
+ - Test: `searchFTS(db, query, maxResults)` returns matching chunks with BM25 rank scores
+ - Test: matches on exact keywords
+ - Test: matches on partial words (FTS5 prefix queries)
+ - Test: returns empty array for no matches
+ - Test: results are ranked by BM25 relevance (best first)
+ - Test: respects maxResults limit
+ - Test: returns chunk metadata (file_path, start_line, end_line, text, score)
+ - **GREEN**: Implement FTS5 search in `src/search.ts`:
+ - `searchFTS(db, query, maxResults): SearchResult[]`:
+ ```sql
+ SELECT c.id, c.file_path, c.start_line, c.end_line, c.text,
+ rank AS score
+ FROM chunks_fts
+ JOIN chunks c ON chunks_fts.rowid = c.rowid
+ WHERE chunks_fts MATCH ?
+ ORDER BY rank
+ LIMIT ?
+ ```
+ - Normalize BM25 scores to 0-1 range for hybrid combiner
+ - Handle FTS5 query syntax (escape special characters)
+
+ **Must NOT do**:
+ - Don't implement hybrid combination (Task 10)
+ - Don't add query preprocessing or expansion
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - Reason: Single SQL query + score normalization. Small, focused module.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 4 (with Task 9)
+ - **Blocks**: Task 10
+ - **Blocked By**: Task 6
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/hybrid.ts:50-100` — BM25 search implementation and score normalization. This is the PRIMARY reference.
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:1000-1100` — How FTS5 queries are constructed and executed
+
+ **External References**:
+ - SQLite FTS5: `https://www.sqlite.org/fts5.html#the_bm25_function`
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/search-fts.test.ts`
+ - [ ] `bun test src/__tests__/search-fts.test.ts` → PASS (all 7 tests)
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: FTS5 search finds indexed content
+ Tool: Bash (bun test)
+ Preconditions: Test DB with indexed chunks containing known text
+ Steps:
+ 1. bun test src/__tests__/search-fts.test.ts
+ 2. Assert: searching "TypeScript" finds chunk containing "TypeScript is my preferred language"
+ 3. Assert: score is a number between 0 and 1
+ Expected Result: Keyword search returns ranked results
+ Evidence: Test output
+ ```
+
+ **Commit**: YES (groups with 9, 10)
+ - Message: `feat(search): FTS5 BM25 keyword search module`
+ - Files: `src/search.ts` (partial), `src/__tests__/search-fts.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 9. Vector Cosine Similarity Search Module
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/search-vec.test.ts`:
+ - Test: `searchVector(db, queryEmbedding, maxResults)` returns chunks ranked by cosine similarity
+ - Test: more similar content scores higher
+ - Test: returns empty array when no data in vec0 table
+ - Test: respects maxResults limit
+ - Test: returns chunk metadata (file_path, start_line, end_line, text, score)
+ - Test: handles case where vec0 extension is unavailable (returns empty, doesn't crash)
+ - **GREEN**: Implement vector search in `src/search.ts`:
+ - `searchVector(db, queryEmbedding, maxResults): SearchResult[]`:
+ ```sql
+ SELECT c.id, c.file_path, c.start_line, c.end_line, c.text,
+ distance AS score
+ FROM chunks_vec
+ JOIN chunks c ON chunks_vec.rowid = c.rowid
+ WHERE embedding MATCH ?
+ ORDER BY distance
+ LIMIT ?
+ ```
+ - Convert distance to similarity score (1 - distance for cosine)
+ - Normalize to 0-1 range
+ - Handle vec0 unavailability: return empty results, log warning
+
+ **Must NOT do**:
+ - Don't implement hybrid combination (Task 10)
+ - Don't implement approximate nearest neighbor tuning
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - Reason: Single SQL query + distance-to-similarity conversion. Small module, similar to Task 8.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 4 (with Task 8)
+ - **Blocks**: Task 10
+ - **Blocked By**: Task 6
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/hybrid.ts:100-150` — Vector search implementation. Shows cosine distance query and score conversion. PRIMARY reference.
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:1100-1200` — How vector queries are constructed
+
+ **External References**:
+ - sqlite-vec query syntax: `https://alexgarcia.xyz/sqlite-vec/api-reference.html`
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/search-vec.test.ts`
+ - [ ] `bun test src/__tests__/search-vec.test.ts` → PASS (all 6 tests)
+ - [ ] Gracefully handles missing vec0 extension
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Vector search returns semantically similar results
+ Tool: Bash (bun test)
+ Preconditions: Test DB with embedded chunks
+ Steps:
+ 1. bun test src/__tests__/search-vec.test.ts
+ 2. Assert: query about "programming language preferences" finds chunk about "TypeScript"
+ 3. Assert: similarity score decreases for less relevant chunks
+ Expected Result: Semantic search returns ranked results
+ Evidence: Test output
+ ```
+
+ **Commit**: YES (groups with 8, 10)
+ - Message: `feat(search): vector cosine similarity search module`
+ - Files: `src/search.ts` (extended), `src/__tests__/search-vec.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [x] 10. Hybrid Search Combiner
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/search-hybrid.test.ts`:
+ - Test: `hybridSearch(db, query, embedder, config)` combines FTS5 and vector results
+ - Test: weighting applies correctly (0.7 * vectorScore + 0.3 * textScore)
+ - Test: results below minScore threshold are filtered out
+ - Test: duplicate chunks (found by both searches) are merged (not duplicated)
+ - Test: results are sorted by combined score (highest first)
+ - Test: maxResults is respected after merging
+ - Test: works with only FTS5 results (vec0 unavailable) — degraded mode
+ - Test: works with only vector results (FTS5 query fails) — degraded mode
+ - Test: custom weights override defaults
+ - **GREEN**: Implement `src/search.ts` (add to existing):
+ - `async hybridSearch(db, query, embedder, config): Promise`:
+ 1. Run FTS5 search: `searchFTS(db, query, config.maxResults * 2)`
+ 2. Embed query: `embedder.embed(query)` (with cache)
+ 3. Run vector search: `searchVector(db, queryEmbedding, config.maxResults * 2)`
+ 4. Merge results by chunk ID:
+ - If in both: `combinedScore = vectorWeight * vectorScore + textWeight * textScore`
+ - If only FTS5: `combinedScore = textWeight * textScore`
+ - If only vector: `combinedScore = vectorWeight * vectorScore`
+ 5. Filter by minScore
+ 6. Sort by combinedScore descending
+ 7. Limit to maxResults
+ 8. Return with source metadata (file_path, start_line, end_line, text, score, source)
+
+ **Must NOT do**:
+ - Don't implement query expansion or rewriting
+ - Don't implement re-ranking with a separate model
+
+ **Recommended Agent Profile**:
+ - **Category**: `ultrabrain`
+ - Reason: Score merging, deduplication by ID, weighted combination, edge case handling (degraded modes). Requires careful algorithmic thinking.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (depends on 8 and 9)
+ - **Parallel Group**: Wave 4 (after Tasks 8 + 9)
+ - **Blocks**: Task 11
+ - **Blocked By**: Tasks 8, 9
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/hybrid.ts` — THE reference for hybrid search. This entire file is the pattern. Shows score normalization, weighted combination, merging, deduplication, filtering, sorting. Copy the algorithm closely.
+
+ **WHY This Reference Matters**:
+ - This is the heart of Openclaw's memory system. The hybrid search combiner determines recall quality. The weighting, merging, and filtering logic must be correct.
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/search-hybrid.test.ts`
+ - [ ] `bun test src/__tests__/search-hybrid.test.ts` → PASS (all 9 tests)
+ - [ ] Degraded mode works (FTS-only, vector-only)
+ - [ ] Duplicate chunks merged correctly
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Hybrid search combines vector and keyword results
+ Tool: Bash (bun test)
+ Preconditions: DB with indexed chunks containing diverse content
+ Steps:
+ 1. bun test src/__tests__/search-hybrid.test.ts
+ 2. Assert: hybrid results include chunks found by BOTH methods
+ 3. Assert: combined score = 0.7 * vectorScore + 0.3 * textScore for shared results
+ 4. Assert: results sorted by combined score descending
+ 5. Assert: results below minScore=0.35 are filtered
+ Expected Result: Hybrid search correctly combines and ranks
+ Evidence: Test output
+
+ Scenario: Graceful degradation when vec0 unavailable
+ Tool: Bash (bun test)
+ Preconditions: Mock vec0 as unavailable
+ Steps:
+ 1. bun test src/__tests__/search-hybrid.test.ts --filter "degraded"
+ 2. Assert: FTS-only results returned with textWeight scoring
+ 3. Assert: no error thrown
+ Expected Result: Search works with BM25 only
+ Evidence: Test output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(search): hybrid search combiner with weighted vector+BM25 scoring`
+ - Files: `src/search.ts` (complete), `src/__tests__/search-hybrid.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [ ] 11. Agent Tools — memory_search, memory_store, memory_get
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/tools.test.ts`:
+ - Test: `memorySearchTool` schema validates query string, optional maxResults, optional source filter
+ - Test: `memorySearchTool.execute(query)` calls hybridSearch and formats results
+ - Test: `memoryStoreTool` schema validates content string, optional title, optional category
+ - Test: `memoryStoreTool.execute(content, title, category)` creates markdown file in vault with frontmatter, then indexes it
+ - Test: markdown filename is slugified from title (or timestamp-based if no title)
+ - Test: markdown has YAML frontmatter (type, category, created_at, source)
+ - Test: `memoryGetTool` schema validates filePath, optional startLine, optional endLine
+ - Test: `memoryGetTool.execute(filePath, startLine, endLine)` reads file and returns specified line range
+ - Test: memoryGetTool rejects paths outside vault directory
+ - Test: atomic write (temp file + rename) for memoryStore
+ - **GREEN**: Implement `src/tools.ts`:
+ - `memorySearchTool`:
+ - Schema: `{ query: z.string(), maxResults?: z.number().default(6), source?: z.enum(["memory","sessions","all"]).default("all") }`
+ - Execute: call hybridSearch, format results as:
+ ```
+ Found N relevant memories:
+ ---
+ [1] {file_path}:{start_line}-{end_line} (score: 0.85, source: memory)
+ {text content}
+ ---
+ ```
+ - `memoryStoreTool`:
+ - Schema: `{ content: z.string(), title?: z.string(), category?: z.enum(["preferences","facts","decisions","entities","other"]).default("other") }`
+ - Execute:
+ 1. Generate filename: `{category}/{slugify(title)}-{timestamp}.md` or `{category}/{timestamp}.md`
+ 2. Create YAML frontmatter: `---\ntype: memory\ncategory: {cat}\ncreated_at: {ISO date}\nsource: agent\n---`
+ 3. Write to vault atomically (write to .tmp, rename)
+ 4. Index the new file immediately
+ 5. Return confirmation with file path
+ - `memoryGetTool`:
+ - Schema: `{ filePath: z.string(), startLine?: z.number(), endLine?: z.number() }`
+ - Execute: validate path is within vault, read file, return requested lines
+ - Security: reject any path not starting with vault directory
+ - **REFACTOR**: Extract markdown generation, ensure consistent frontmatter
+
+ **Must NOT do**:
+ - Don't implement memory_update or memory_delete (read + search + store covers needs)
+ - Don't implement bulk operations
+ - Don't add LLM-based summarization to memory_store
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: Three tools with Zod schemas, file I/O, security validation, integration with all previous modules. Significant integration task.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO
+ - **Parallel Group**: Wave 5
+ - **Blocks**: Task 12
+ - **Blocked By**: Tasks 10, 7
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/agents/tools/memory-tool.ts` — Openclaw's memory tools. Shows exact tool structure, Zod schema patterns, result formatting. PRIMARY reference for tool design.
+ - `/home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md` — Current memory skill workflows (store, recall). Shows the user's expected interaction patterns.
+ - `/home/m3tam3re/p/AI/AGENTS/skills/obsidian/SKILL.md` — Obsidian vault structure and frontmatter conventions for `~/CODEX/80-memory/`. Shows category subfolders and frontmatter template.
+
+ **API/Type References**:
+ - Opencode custom tool format: `https://opencode.ai/docs/custom-tools/` — Tool definition with Zod schemas
+ - Opencode plugin tool() helper: `https://opencode.ai/docs/plugins/` — How to define tools inside a plugin
+
+ **WHY Each Reference Matters**:
+ - `memory-tool.ts`: Direct pattern for memory_search output formatting. Shows how to present search results concisely.
+ - `SKILL.md` (memory): Shows user's existing mental model of store/recall workflows. Tools should match expectations.
+ - `SKILL.md` (obsidian): Shows frontmatter format and category subfolders (`preferences/`, `facts/`, etc.).
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/tools.test.ts`
+ - [ ] `bun test src/__tests__/tools.test.ts` → PASS (all 10 tests)
+ - [ ] memory_store creates files atomically (temp + rename)
+ - [ ] memory_get rejects paths outside vault
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: memory_store creates markdown with correct frontmatter
+ Tool: Bash
+ Preconditions: Vault directory exists, DB initialized
+ Steps:
+ 1. Call memoryStoreTool.execute({ content: "I prefer dark themes in all editors", title: "editor preferences", category: "preferences" })
+ 2. Assert: file created at ~/CODEX/80-memory/preferences/editor-preferences-*.md
+ 3. cat the file → Assert frontmatter contains "type: memory", "category: preferences"
+ 4. Assert: body contains "I prefer dark themes in all editors"
+ Expected Result: Markdown file with correct structure
+ Evidence: File content captured
+
+ Scenario: memory_search returns formatted results
+ Tool: Bash
+ Preconditions: Vault has indexed memories
+ Steps:
+ 1. Call memorySearchTool.execute({ query: "editor theme preference" })
+ 2. Assert: output contains "Found N relevant memories"
+ 3. Assert: results include file paths and scores
+ 4. Assert: scores are between 0 and 1
+ Expected Result: Formatted search results
+ Evidence: Tool output captured
+
+ Scenario: memory_get rejects path traversal
+ Tool: Bash (bun test)
+ Preconditions: Tools module implemented
+ Steps:
+ 1. bun test src/__tests__/tools.test.ts --filter "rejects paths outside"
+ 2. Assert: memoryGetTool.execute({ filePath: "/etc/passwd" }) throws error
+ 3. Assert: memoryGetTool.execute({ filePath: "../../etc/passwd" }) throws error
+ Expected Result: Security validation works
+ Evidence: Test output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(tools): agent tools — memory_search, memory_store, memory_get`
+ - Files: `src/tools.ts`, `src/__tests__/tools.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [ ] 12. Plugin Entry Point — Hooks + Lifecycle
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/plugin.test.ts`:
+ - Test: plugin exports default function
+ - Test: plugin registers 3 tools (memory_search, memory_store, memory_get)
+ - Test: session.created handler calls auto-recall (hybrid search on session context)
+ - Test: session.idle handler calls auto-capture (extract and store memories)
+ - Test: session.compacting handler injects memory context (≤ 2000 tokens)
+ - Test: plugin initializes DB on first call (lazy init)
+ - Test: plugin handles DB initialization failure gracefully (log, continue)
+ - Test: plugin handles embedding API failure gracefully (log, continue)
+ - Test: token budget is respected in injection (truncate if > 2000 tokens)
+ - **GREEN**: Implement `src/index.ts`:
+ - Default export: Opencode plugin function
+ ```typescript
+ import { tool } from "@opencode-ai/plugin"
+
+ export default function(ctx) {
+ // Lazy initialization
+ let db, embedder, config
+
+ const init = () => {
+ if (db) return
+ config = resolveConfig()
+ db = initDatabase(config.dbPath)
+ embedder = new EmbeddingProvider({ db, model: config.embedding.model })
+ }
+
+ // Register tools
+ ctx.tool("memory_search", memorySearchTool.schema, async (params) => {
+ init()
+ return memorySearchTool.execute(params, db, embedder, config)
+ })
+
+ ctx.tool("memory_store", memoryStoreTool.schema, async (params) => {
+ init()
+ return memoryStoreTool.execute(params, db, embedder, config)
+ })
+
+ ctx.tool("memory_get", memoryGetTool.schema, async (params) => {
+ init()
+ return memoryGetTool.execute(params, config)
+ })
+
+ // Event hooks
+ ctx.on("session.created", async (event) => {
+ init()
+ // Auto-recall: search for relevant memories based on session context
+ // Inject results into system prompt or initial context
+ })
+
+ ctx.on("session.idle", async (event) => {
+ init()
+ // Auto-capture: extract memories from recent conversation
+ // Store as markdown + index
+ })
+
+ ctx.on("experimental.session.compacting", async (event) => {
+ init()
+ // Inject relevant memory context into compaction
+ // Respect token budget
+ })
+ }
+ ```
+ - Auto-recall logic: On session.created, search for memories related to the project directory and recent context. Format top results within token budget. Inject via system prompt addition.
+ - Auto-capture logic: On session.idle, analyze recent messages. Use LLM (or simple heuristics) to extract key facts, decisions, preferences. Store as markdown via memoryStoreTool.
+ - Compaction injection: On session.compacting, search for relevant memories and include in compaction context.
+ - Error wrapping: ALL hooks wrapped in try/catch → log error, never crash Opencode
+ - File watcher: Start chokidar watcher on vault directory for live file changes → re-index changed files
+
+ **Must NOT do**:
+ - Don't implement complex LLM-based extraction for auto-capture (use simple heuristic or minimal prompt — Phase 2 can enhance)
+ - Don't implement custom settings UI
+ - Don't add CLI commands (Task 13 handles rebuild)
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - Reason: Plugin integration with Opencode's event system, lifecycle management, error handling, file watching. Core integration task.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO
+ - **Parallel Group**: Wave 5 (after Task 11)
+ - **Blocks**: Task 13
+ - **Blocked By**: Task 11
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/agents/tools/memory-tool.ts` — How tools are registered and how search results are formatted for agent consumption
+ - `/home/m3tam3re/p/AI/openclaw/src/agents/system-prompt.ts` — How memory instructions are injected into system prompt. Shows the "Before answering, search memory..." pattern.
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:1300-1590` — Lifecycle methods: init, sync, shutdown, file watcher setup
+
+ **API/Type References**:
+ - Opencode plugin API: `https://opencode.ai/docs/plugins/` — Plugin function signature, ctx.tool(), ctx.on(), event types
+ - Opencode custom tools: `https://opencode.ai/docs/custom-tools/` — Tool schema format with Zod
+
+ **Documentation References**:
+ - `/home/m3tam3re/.config/opencode/opencode.json:128-132` — Existing plugin registration pattern (shows how plugins are listed)
+
+ **WHY Each Reference Matters**:
+ - `system-prompt.ts`: Shows the exact memory instruction pattern that makes agents reliably use memory tools. Without this, agents may ignore the tools.
+ - Plugin docs: The exact API surface for ctx.tool() and ctx.on(). Critical for correct integration.
+ - `manager.ts:1300-1590`: Shows chokidar file watcher setup, debouncing, and cleanup.
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/plugin.test.ts`
+ - [ ] `bun test src/__tests__/plugin.test.ts` → PASS (all 9 tests)
+ - [ ] All hooks wrapped in try/catch
+ - [ ] Token budget respected
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Plugin loads in opencode without errors
+ Tool: interactive_bash (tmux)
+ Preconditions: Plugin built, registered in opencode.json
+ Steps:
+ 1. Add "opencode-memory" to plugin list in opencode.json (or use local path)
+ 2. Start opencode in tmux session
+ 3. Wait for initialization (5s)
+ 4. Check opencode logs for "opencode-memory" → no errors
+ 5. Verify memory_search tool is available (try calling it)
+ Expected Result: Plugin loads, tools available
+ Evidence: Terminal output, opencode log screenshot
+
+ Scenario: Plugin handles missing vault directory gracefully
+ Tool: Bash
+ Preconditions: Vault directory temporarily renamed
+ Steps:
+ 1. mv ~/CODEX/80-memory ~/CODEX/80-memory.bak
+ 2. Start plugin init → should log warning, not crash
+ 3. mv ~/CODEX/80-memory.bak ~/CODEX/80-memory
+ Expected Result: Graceful degradation with warning
+ Evidence: Log output
+
+ Scenario: Token budget limits memory injection
+ Tool: Bash (bun test)
+ Preconditions: DB with many indexed memories
+ Steps:
+ 1. bun test src/__tests__/plugin.test.ts --filter "token budget"
+ 2. Assert: injected context ≤ 2000 tokens
+ Expected Result: Budget respected
+ Evidence: Test output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(plugin): Opencode plugin entry point with hooks and lifecycle`
+ - Files: `src/index.ts`, `src/__tests__/plugin.test.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [ ] 13. Integration Testing + Error Handling + Rebuild Command
+
+ **What to do**:
+ - **RED**: Write `src/__tests__/integration.test.ts`:
+ - Test: full pipeline — create memory → search → find it
+ - Test: full pipeline — index vault → search → get file
+ - Test: full pipeline — index sessions → search session content
+ - Test: rebuild command — delete DB → rebuild → all content searchable again
+ - Test: OpenAI API failure → plugin continues, BM25-only results
+ - Test: corrupt SQLite → auto-recreate on next init
+ - Test: concurrent search + index operations don't deadlock
+ - Test: empty vault → no errors, empty search results
+ - Test: very large file (1MB+) → chunks correctly, no OOM
+ - **GREEN**:
+ - Add CLI entry point for rebuild: `src/cli.ts`
+ ```typescript
+ // bun run src/cli.ts --rebuild [--vault path] [--db path]
+ ```
+ - Add error recovery to `initDatabase`: if DB is corrupt, delete and recreate
+ - Add timeout to embedding API calls (30s default)
+ - Add graceful shutdown: close DB, stop file watcher, on process exit
+ - Ensure all error paths are covered with try/catch
+ - **REFACTOR**: Run full test suite, fix any integration issues
+
+ **Must NOT do**:
+ - Don't build a comprehensive CLI (just --rebuild)
+ - Don't add progress bars or fancy output
+ - Don't implement migration from mem0
+
+ **Recommended Agent Profile**:
+ - **Category**: `deep`
+ - Reason: Integration testing requires understanding the full system. Error scenarios require careful thinking about failure modes and recovery.
+ - **Skills**: [`systematic-debugging`]
+ - `systematic-debugging`: Needed for diagnosing integration test failures systematically
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO
+ - **Parallel Group**: Wave 6
+ - **Blocks**: Task 14
+ - **Blocked By**: Task 12
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:1-50` — Imports and error handling patterns
+ - `/home/m3tam3re/p/AI/openclaw/src/memory/manager.ts:1400-1590` — Shutdown and cleanup logic
+
+ **Acceptance Criteria**:
+
+ **TDD:**
+ - [ ] Test file: `src/__tests__/integration.test.ts`
+ - [ ] `bun test` → ALL tests pass (0 failures across all test files)
+ - [ ] CLI rebuild: `bun run src/cli.ts --rebuild` works
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Full pipeline — store and search a memory
+ Tool: Bash
+ Preconditions: Plugin module fully implemented
+ Steps:
+ 1. Initialize in-memory DB + embedder
+ 2. Store memory: "I prefer Nix for system configuration"
+ 3. Search: "system configuration tool"
+ 4. Assert: search result contains "Nix" with score > 0.35
+ Expected Result: End-to-end pipeline works
+ Evidence: Terminal output
+
+ Scenario: Rebuild command recreates index from markdown
+ Tool: Bash
+ Preconditions: Vault has markdown files, DB deleted
+ Steps:
+ 1. rm -f ~/.local/share/opencode-memory/index.db
+ 2. bun run src/cli.ts --rebuild --vault ~/CODEX/80-memory/
+ 3. Assert: DB file created
+ 4. Assert: search for known vault content returns results
+ Expected Result: Index rebuilt from markdown source of truth
+ Evidence: Terminal output + DB file exists
+
+ Scenario: API failure degrades gracefully
+ Tool: Bash
+ Preconditions: OPENAI_API_KEY=invalid
+ Steps:
+ 1. OPENAI_API_KEY=invalid bun test src/__tests__/integration.test.ts --filter "API failure"
+ 2. Assert: no crash, BM25-only results returned
+ 3. Assert: error logged to stderr
+ Expected Result: Graceful degradation
+ Evidence: Test output
+ ```
+
+ **Commit**: YES
+ - Message: `feat(integration): integration tests, error recovery, rebuild CLI`
+ - Files: `src/__tests__/integration.test.ts`, `src/cli.ts`
+ - Pre-commit: `bun test`
+
+---
+
+- [ ] 14. AGENTS Repo Skill Updates + Deployment Config
+
+ **What to do**:
+ - Update `skills/memory/SKILL.md` in the AGENTS repo:
+ - Replace dual-layer (mem0 + Obsidian) description with opencode-memory plugin description
+ - Document new architecture: SQLite hybrid search, markdown source of truth
+ - Update workflows: memory_search, memory_store, memory_get tools
+ - Document auto-recall (session.created) and auto-capture (session.idle) behavior
+ - Remove references to mem0 REST API
+ - Keep Obsidian vault references (still at `~/CODEX/80-memory/`)
+ - Update `skills/mem0-memory/SKILL.md`:
+ - Add deprecation notice at top: "DEPRECATED: Replaced by opencode-memory plugin. See skills/memory/SKILL.md."
+ - Keep existing content for reference
+ - Add plugin registration note to `context/profile.md`:
+ - Update memory system description to reference opencode-memory
+ - Create deployment documentation in `skills/memory/references/deployment.md`:
+ - How to install opencode-memory (npm or Nix)
+ - How to register in opencode.json
+ - How to configure vault path and embedding provider
+ - How to verify installation
+
+ **Must NOT do**:
+ - Don't modify opencode.json (user does this manually after deployment)
+ - Don't delete mem0-memory skill (just deprecate)
+ - Don't modify agent definitions (Apollo agent config stays)
+ - Don't create README in the opencode-memory repo
+
+ **Recommended Agent Profile**:
+ - **Category**: `writing`
+ - Reason: Documentation-heavy task. Updating skill files, writing deployment notes.
+ - **Skills**: none
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (final task)
+ - **Parallel Group**: Wave 6 (after Task 13)
+ - **Blocks**: None (final task)
+ - **Blocked By**: Task 13
+
+ **References**:
+
+ **Pattern References**:
+ - `/home/m3tam3re/p/AI/AGENTS/skills/memory/SKILL.md` — Current memory skill to update. Preserve structure, update content.
+ - `/home/m3tam3re/p/AI/AGENTS/skills/mem0-memory/SKILL.md` — Current mem0 skill to deprecate. Add deprecation notice.
+ - `/home/m3tam3re/p/AI/AGENTS/context/profile.md` — User profile with memory references to update.
+ - `/home/m3tam3re/p/AI/AGENTS/skills/obsidian/SKILL.md` — Obsidian skill for reference (vault structure stays same).
+
+ **Acceptance Criteria**:
+
+ **Agent-Executed QA Scenarios:**
+
+ ```
+ Scenario: Updated memory skill validates
+ Tool: Bash
+ Preconditions: AGENTS repo skills updated
+ Steps:
+ 1. ./scripts/test-skill.sh memory
+ 2. Assert: validation passes
+ 3. grep "opencode-memory" skills/memory/SKILL.md → found
+ 4. grep "DEPRECATED" skills/mem0-memory/SKILL.md → found
+ Expected Result: Skills validate, content updated
+ Evidence: Validation output
+
+ Scenario: Profile references new memory system
+ Tool: Bash
+ Preconditions: context/profile.md updated
+ Steps:
+ 1. grep "opencode-memory" context/profile.md → found
+ 2. grep "mem0" context/profile.md → NOT found (or marked deprecated)
+ Expected Result: Profile references updated
+ Evidence: grep output
+ ```
+
+ **Commit**: YES
+ - Message: `docs(memory): update skills for opencode-memory plugin, deprecate mem0`
+ - Files: `skills/memory/SKILL.md`, `skills/mem0-memory/SKILL.md`, `context/profile.md`, `skills/memory/references/deployment.md`
+ - Pre-commit: `./scripts/test-skill.sh --validate`
+
+---
+
+## Commit Strategy
+
+| After Task | Message | Key Files | Verification |
+|------------|---------|-----------|--------------|
+| 1 | `feat(scaffold): initialize opencode-memory repo` | package.json, tsconfig.json, src/*.ts stubs | `bun test` |
+| 2+3+4 | `feat(core): config, database schema, file discovery` | config.ts, db.ts, discovery.ts + tests | `bun test` |
+| 5 | `feat(embeddings): OpenAI provider with cache` | embeddings.ts + test | `bun test` |
+| 6 | `feat(indexer): file indexer pipeline` | indexer.ts + test | `bun test` |
+| 7 | `feat(sessions): session transcript parser` | sessions.ts + test | `bun test` |
+| 8+9+10 | `feat(search): hybrid search (FTS5 + vec0)` | search.ts + tests | `bun test` |
+| 11 | `feat(tools): agent memory tools` | tools.ts + test | `bun test` |
+| 12 | `feat(plugin): Opencode plugin entry point` | index.ts + test | `bun test` |
+| 13 | `feat(integration): tests + error recovery + rebuild` | integration.test.ts, cli.ts | `bun test` |
+| 14 | `docs(memory): update AGENTS repo skills` | SKILL.md files | `./scripts/test-skill.sh --validate` |
+
+---
+
+## Success Criteria
+
+### Verification Commands
+```bash
+# In opencode-memory repo:
+bun test # Expected: ALL tests pass (0 failures)
+bun run src/cli.ts --rebuild --vault ~/CODEX/80-memory/ # Expected: index rebuilt
+
+# In AGENTS repo:
+./scripts/test-skill.sh --validate # Expected: all skills valid
+
+# In opencode (after registration):
+# memory_search tool available and returns results
+# memory_store tool creates markdown files
+# memory_get tool reads file content
+```
+
+### Final Checklist
+- [ ] All "Must Have" items present (hybrid search, caching, graceful degradation, etc.)
+- [ ] All "Must NOT Have" items absent (no mem0 dependency, no multi-vault, no UI, etc.)
+- [ ] All unit tests pass (`bun test`)
+- [ ] Integration tests pass
+- [ ] Plugin loads in Opencode without errors
+- [ ] Auto-recall fires on session.created
+- [ ] Auto-capture fires on session.idle
+- [ ] Rebuild command recreates index from markdown
+- [ ] OpenAI failure doesn't crash plugin
+- [ ] AGENTS repo skills updated and validated
diff --git a/context/profile.md b/context/profile.md
index 2b92679..a1afd65 100644
--- a/context/profile.md
+++ b/context/profile.md
@@ -109,18 +109,19 @@
## Memory System
-AI agents have access to a dual-layer memory system for persistent context across sessions.
+AI agents have access to a persistent memory system for context across sessions via the opencode-memory plugin.
### Configuration
| Setting | Value |
|---------|-------|
-| **Mem0 Endpoint** | `http://localhost:8000` |
-| **Mem0 User ID** | `m3tam3re` |
+| **Plugin** | `opencode-memory` |
| **Obsidian Vault** | `~/CODEX` |
| **Memory Folder** | `80-memory/` |
-| **Auto-Capture** | Enabled (max 3 per session) |
-| **Auto-Recall** | Enabled (top 5, score > 0.7) |
+| **Database** | `~/.local/share/opencode-memory/index.db` |
+| **Auto-Capture** | Enabled (session.idle event) |
+| **Auto-Recall** | Enabled (session.created event) |
+| **Token Budget** | 2000 tokens |
### Memory Categories
@@ -132,16 +133,19 @@ AI agents have access to a dual-layer memory system for persistent context acros
| `entity` | People, orgs, systems | Key contacts, important APIs |
| `other` | Everything else | General learnings |
-### MCP Server
+### Available Tools
-| Setting | Value |
-|---------|-------|
-| **Server** | `cyanheads/obsidian-mcp-server` |
-| **Config** | See `skills/memory/references/mcp-config.md` |
+| Tool | Purpose |
+|------|---------|
+| `memory_search` | Hybrid search (vector + BM25) over vault + sessions |
+| `memory_store` | Store new memory as markdown file |
+| `memory_get` | Read specific file/lines from vault |
### Usage Notes
-- Memories are stored in BOTH Mem0 and Obsidian for redundancy
+- Memories are stored as markdown files in Obsidian (source of truth)
+- SQLite provides fast hybrid search (vector similarity + keyword BM25)
- Use explicit "remember this" to store important information
-- Auto-capture happens at session end with user confirmation
-- Relevant memories are injected at session start based on context
+- Auto-recall injects relevant memories at session start
+- Auto-capture extracts preferences/decisions at session idle
+- See `skills/memory/SKILL.md` for full documentation
diff --git a/skills/mem0-memory/SKILL.md b/skills/mem0-memory/SKILL.md
index 3f18099..fb75f89 100644
--- a/skills/mem0-memory/SKILL.md
+++ b/skills/mem0-memory/SKILL.md
@@ -1,10 +1,16 @@
---
name: mem0-memory
-description: "Store and retrieve memories using Mem0 REST API. Use when: (1) storing information for future recall, (2) searching past conversations or facts, (3) managing user/agent memory contexts, (4) building conversational AI with persistent memory. Triggers on keywords like 'remember', 'recall', 'memory', 'store for later', 'what did I say about'."
+description: "DEPRECATED: Replaced by opencode-memory plugin. See skills/memory/SKILL.md for current memory system."
compatibility: opencode
---
-# Mem0 Memory
+> ⚠️ **DEPRECATED**
+>
+> This skill is deprecated. The memory system has been replaced by the opencode-memory plugin.
+>
+> **See:** `skills/memory/SKILL.md` for the current memory system.
+
+# Mem0 Memory (Legacy)
Store and retrieve memories via Mem0 REST API at `http://localhost:8000`.
diff --git a/skills/memory/SKILL.md b/skills/memory/SKILL.md
index 0a2fc4c..c920dd7 100644
--- a/skills/memory/SKILL.md
+++ b/skills/memory/SKILL.md
@@ -1,201 +1,75 @@
---
name: memory
-description: "Dual-layer memory system (Mem0 + Obsidian CODEX). Use when: (1) storing information for future recall ('remember this'), (2) auto-capturing session insights, (3) recalling past decisions/preferences/facts, (4) injecting relevant context before tasks. Triggers: 'remember', 'recall', 'what do I know about', 'memory', session end."
+description: "Persistent memory system for Opencode agents. SQLite-based hybrid search over Obsidian vault. Use when: (1) storing user preferences/decisions, (2) recalling past context, (3) searching knowledge base. Triggers: remember, recall, memory, store, preference."
compatibility: opencode
---
-# Memory
-
-Dual-layer memory system for persistent AI agent context. Memories are stored in BOTH Mem0 (semantic search) AND Obsidian CODEX vault (human-readable, versioned).
-
## Overview
-**Architecture:**
-- **Mem0 Layer** (`localhost:8000`): Fast semantic search, operational memory
-- **Obsidian Layer** (`~/CODEX/80-memory/`): Human-readable notes, version controlled, wiki-linked
+opencode-memory is a SQLite-based hybrid memory system for Opencode agents. It indexes markdown files from your Obsidian vault (`~/CODEX/80-memory/`) and session transcripts, providing fast hybrid search (vector + keyword BM25).
-**Cross-Reference:**
-- `mem0_id` in Obsidian frontmatter links to Mem0
-- `obsidian_ref` in Mem0 metadata links to vault file
+## Architecture
-## Prerequisites
+- **Source of truth**: Markdown files at `~/CODEX/80-memory/`
+- **Derived index**: SQLite at `~/.local/share/opencode-memory/index.db`
+- **Hybrid search**: FTS5 (BM25) + vec0 (vector similarity)
+- **Embeddings**: OpenAI text-embedding-3-small (1536 dimensions)
-1. **Mem0 running** at `http://localhost:8000` - Verify with `curl http://localhost:8000/health`
-2. **Obsidian MCP configured** - See [references/mcp-config.md](references/mcp-config.md)
-3. **Vault structure** - `80-memory/` folder with category subfolders
+## Available Tools
-## Memory Categories
-
-| Category | Definition | Examples |
-|----------|------------|----------|
-| `preference` | Personal preferences (UI, workflow, communication style) | Dark mode, async communication, detailed responses |
-| `fact` | Objective information about user/work (role, tech stack, constraints) | Job title, preferred languages, system architecture |
-| `decision` | Architectural/tool choices made (with rationale) | Using React over Vue, PostgreSQL over MySQL |
-| `entity` | People, organizations, systems, concepts | Key contacts, important APIs, domain concepts |
-| `other` | Everything else | General learnings, context notes |
-
-## Workflow 1: Store Memory (Explicit)
-
-When user says "remember this" or "store this":
-
-### Steps
-
-1. **Classify category** - Determine which of the 5 categories applies
-2. **Store in Mem0** - POST to `/memories`:
- ```bash
- curl -X POST http://localhost:8000/memories \
- -H "Content-Type: application/json" \
- -d '{
- "messages": [{"role": "user", "content": "[memory content]"}],
- "user_id": "m3tam3re",
- "metadata": {
- "category": "preference",
- "source": "explicit"
- }
- }'
- ```
-3. **Create Obsidian note** - Use memory template in `80-memory//`:
- - Set `mem0_id` from Mem0 response
- - Set `source: explicit`
-4. **Update Mem0 with Obsidian ref** - Add `obsidian_ref` to metadata
-
-### Example
+### memory_search
+Hybrid search over all indexed content (vault + sessions).
```
-User: "Remember that I prefer detailed explanations with code examples"
-
-Agent:
-1. Category: preference
-2. Mem0: Store with category=preference, source=explicit
-3. Obsidian: Create 80-memory/preferences/prefers-detailed-explanations.md
-4. Cross-reference IDs
+memory_search(query, maxResults?, source?)
```
-## Workflow 2: Recall Memory
+- `query`: Search query (natural language)
+- `maxResults`: Max results (default 6)
+- `source`: Filter by "memory", "sessions", or "all"
-When user asks "what do I know about X":
-
-### Steps
-
-1. **Search Mem0** - POST to `/search`:
- ```bash
- curl -X POST http://localhost:8000/search \
- -H "Content-Type: application/json" \
- -d '{
- "query": "[search query]",
- "user_id": "m3tam3re"
- }'
- ```
-2. **Return results** - Include Obsidian note paths from `obsidian_ref` metadata
-3. **Optionally read full note** - Use Obsidian REST API for complete context
-
-### Example
+### memory_store
+Store new memory as markdown file in vault.
```
-User: "What do you know about my UI preferences?"
-
-Agent:
-1. Search Mem0 for "UI preferences"
-2. Return: "You prefer dark mode (80-memory/preferences/prefers-dark-mode.md)"
+memory_store(content, title?, category?)
```
-## Workflow 3: Auto-Capture (Session End)
+- `content`: Memory content to store
+- `title`: Optional title (slugified for filename)
+- `category`: "preferences", "facts", "decisions", "entities", "other"
-Automatically extract and store valuable memories at session end.
+### memory_get
+Read specific file/lines from vault.
-### Process
-
-1. **Scan conversation** for memory-worthy content:
- - Preferences stated
- - Decisions made
- - Important facts revealed
- - Entities mentioned
-2. **Select top 3** highest-value memories
-3. **For each**: Store in Mem0 AND create Obsidian note (source: "auto-capture")
-4. **Present to user**: "I captured these memories: [list]. Confirm or reject?"
-
-### Memory-Worthy Signals
-
-- "I prefer..." / "I like..." / "I hate..." → preference
-- "We decided to use..." / "Chose X because..." → decision
-- "My role is..." / "We use..." / "The system is..." → fact
-- Names, companies, tools mentioned repeatedly → entity
-
-## Workflow 4: Auto-Recall (Session Start)
-
-Inject relevant memories before starting work.
-
-### Process
-
-1. **On session start**, search Mem0 with user's first message/topic
-2. **If relevant memories found** (score > 0.7), inject as context:
- ```markdown
-
- - [preference] User prefers dark mode in all apps
- - [fact] User's tech stack: TypeScript, React, Node.js
-
- ```
-3. **Limit to top 5** most relevant memories
-
-## Error Handling
-
-### Mem0 Unavailable
-
-```bash
-# Check health first
-curl http://localhost:8000/health
-# If fails: Skip all memory operations, warn user
+```
+memory_get(filePath, startLine?, endLine?)
```
-Response: "Mem0 is not running. Memory features unavailable. Start with: [instructions]"
+## Auto-Behaviors
-### Obsidian Unavailable
+- **Auto-recall**: On session.created, relevant memories are searched and injected
+- **Auto-capture**: On session.idle, preferences/decisions are extracted and stored
+- **Token budget**: Max 2000 tokens injected to respect context limits
-- Store in Mem0 only
-- Log that Obsidian sync failed
-- Continue with degraded functionality
+## Workflows
-### Both Unavailable
+### Recall information
+Before answering about past work, preferences, or decisions:
+1. Call `memory_search` with relevant query
+2. Use `memory_get` to retrieve full context if needed
-- Skip memory entirely
-- Continue without memory features
-- Warn user: "Memory system unavailable"
+### Store new information
+When user expresses preference or decision:
+1. Call `memory_store` with content and category
-## Integration
+## Vault Structure
-### How Other Skills Use Memory
-
-```bash
-# Load memory skill to access workflows
-# Use mem0-memory skill for direct Mem0 API calls
-# Use obsidian skill for direct vault operations
```
-
-### Apollo Agent
-
-Apollo is the primary memory specialist. When complex memory operations needed, delegate to Apollo with memory skill loaded.
-
-### Skill Handoff
-
-| From Skill | Handoff Pattern |
-|------------|----------------|
-| Any skill | Load `memory` skill, call store/recall workflows |
-| mem0-memory | Direct Mem0 API, optionally sync to Obsidian |
-| obsidian | Direct vault operations, use memory template |
-
-## Quick Reference
-
-| Operation | Mem0 API | Obsidian Path |
-|-----------|----------|---------------|
-| Store | POST /memories | 80-memory//*.md |
-| Search | POST /search | Search 80-memory/ |
-| Get | GET /memories/{id} | Read note by path |
-| Update | PUT /memories/{id} | Update note |
-| Health | GET /health | Check REST API |
-
-## See Also
-
-- [references/mcp-config.md](references/mcp-config.md) - Obsidian MCP server configuration
-- `~/p/AI/AGENTS/skills/mem0-memory/SKILL.md` - Mem0 REST API details
-- `~/p/AI/AGENTS/skills/obsidian/SKILL.md` - Obsidian vault operations
-- `~/CODEX/AGENTS.md` - Vault conventions and memory folder docs
+~/CODEX/80-memory/
+├── preferences/ # User preferences
+├── facts/ # Factual knowledge
+├── decisions/ # Design decisions
+├── entities/ # People, projects, concepts
+└── other/ # Uncategorized memories
+```
diff --git a/skills/memory/references/deployment.md b/skills/memory/references/deployment.md
new file mode 100644
index 0000000..aaac4ae
--- /dev/null
+++ b/skills/memory/references/deployment.md
@@ -0,0 +1,54 @@
+# opencode-memory Deployment Guide
+
+## Installation
+
+### Option 1: Nix (Recommended)
+
+Add to your Nix flake:
+
+```nix
+inputs.opencode-memory = {
+ url = "git+https://code.m3ta.dev/m3tam3re/opencode-memory";
+ flake = false;
+};
+```
+
+### Option 2: npm
+
+```bash
+npm install -g @m3tam3re/opencode-memory
+```
+
+## Configuration
+
+Add to `~/.config/opencode/opencode.json`:
+
+```json
+{
+ "plugins": [
+ "opencode-memory"
+ ]
+}
+```
+
+## Environment Variables
+
+- `OPENAI_API_KEY`: Required for embeddings
+
+## Vault Location
+
+Default: `~/CODEX/80-memory/`
+
+Override in plugin config if needed.
+
+## Rebuild Index
+
+```bash
+bun run src/cli.ts --rebuild
+```
+
+## Verification
+
+1. Start Opencode
+2. Call `memory_search` with any query
+3. Verify no errors in logs