chore: update .gitignore, remove tracked .pi-lens files, and sync pending changes #10

Closed
m3tam3re wants to merge 22 commits from chore/update-gitignore-and-changes into master
8 changed files with 386 additions and 17 deletions
Showing only changes of commit 0867492170 - Show all commits

View File

@@ -1,3 +1,3 @@
{
"timestamp": "2026-04-11T04:17:20.531Z"
"timestamp": "2026-04-13T19:16:03.510Z"
}

View File

@@ -1,3 +1,3 @@
{
"timestamp": "2026-04-11T04:17:21.374Z"
"timestamp": "2026-04-13T19:16:06.847Z"
}

View File

@@ -1,3 +1 @@
{
"content": "📌 pi-lens active — as you work on this project, fix any errors you encounter (including pre-existing). Prefer: lsp_navigation for definitions/references, ast_grep_search for code patterns, grep for text/TODO search."
}
null

View File

@@ -1,3 +1,3 @@
{
"timestamp": "2026-04-11T04:21:36.939Z"
"timestamp": "2026-04-13T18:05:03.813Z"
}

View File

@@ -1,3 +1,3 @@
{
"timestamp": "2026-04-11T04:21:36.940Z"
"timestamp": "2026-04-13T18:04:03.698Z"
}

View File

@@ -2,5 +2,5 @@
"files": {},
"turnCycles": 0,
"maxCycles": 3,
"lastUpdated": "2026-04-11T04:17:22.397Z"
"lastUpdated": "2026-04-13T19:16:06.848Z"
}

57
PLAN.md Normal file
View File

@@ -0,0 +1,57 @@
# PLAN
## Context
- Implement **Option A**: run `pi` through a **rootless Podman** container while keeping a native terminal UX.
- Preserve `flake.nix` + `nix develop` workflows by using the **host Nix daemon** from inside the container.
- Keep logic centralized in `nixpkgs` and host-specific values in `nixos-config`.
## Approach
- Extend the existing Home Manager module at `modules/home-manager/coding/agents/pi.nix` with a `coding.agents.pi.container.*` option set.
- Implement **Option A defaults** from your decisions:
- wrapper command name is `pi` (native command replacement),
- project roots are mounted read-write,
- `autoStart = true` by default,
- `autoNixDevelop = false` by default,
- `image` default set to `docker.io/nixos/nix:latest` as a conservative base and overridden in host config for a Pi-ready image.
- Generate a deterministic wrapper script (installed via Home Manager) that:
- verifies cwd is within allowed project roots,
- ensures rootless container exists/runs,
- maps cwd and runs `podman exec -it <container> pi "$@"`,
- optionally runs via `nix develop -c pi ...` when `autoNixDevelop=true` and `flake.nix` is present.
- Configure safe Podman mounts:
- allowed project roots only,
- host Nix daemon socket (Option A),
- minimal Nix config/certs needed for CLI operation.
- Wire host-specific config in `nixos-config/home/features/coding/pi.nix` and remove direct host `pi` binary installation from the coding package list to avoid command-path ambiguity.
## Files to modify
- `modules/home-manager/coding/agents/pi.nix` (new container options + wrapper + container lifecycle logic)
- `/home/m3tam3re/p/NIX/nixos-config/home/features/coding/pi.nix` (host-specific container settings)
## Reuse
- Existing Pi HM module and option namespace:
- `modules/home-manager/coding/agents/pi.nix`
- Existing coding feature wiring in nixos-config:
- `/home/m3tam3re/p/NIX/nixos-config/home/features/coding/default.nix`
- `/home/m3tam3re/p/NIX/nixos-config/home/features/coding/pi.nix`
## Steps
- [ ] Add `coding.agents.pi.container` options (enable/name/image/projectRoots/autoStart/autoNixDevelop/extraRunArgs/extraEnv) with defaults matching your preferences (`autoStart=true`, `autoNixDevelop=false`, default image as above).
- [ ] Implement wrapper script generation in HM module with cwd allowlist checks and container create/start/exec behavior.
- [ ] Make wrapper binary name `pi` (native UX) when container mode is enabled.
- [ ] Add deterministic container run/create args with safe mounts and host Nix daemon socket.
- [ ] Add optional in-container `nix develop -c pi` path when flake project is detected.
- [ ] Wire host-specific values in nixos-config `home/features/coding/pi.nix`.
- [ ] Remove direct host `pi` package install in nixos-config coding packages so wrapper is the effective `pi` command.
- [ ] Validate eval/build and document command outputs for flake and non-flake wrapper behavior.
## Verification
- Static checks for both repos (module eval/build where appropriate).
- Home Manager evaluation/switch check in nixos-config.
- Manual wrapper checks:
- Inside a flake project: `pi` resolves via `nix develop -c pi ...` when enabled.
- Outside flake project: `pi` runs directly via container exec.
- Capture exact commands + outputs for report.
## Open questions
- None currently blocking; proceed with conservative default image and host override guidance.

View File

@@ -7,10 +7,117 @@
with lib; let
cfg = config.coding.agents.pi;
mcpCfg = config.programs.mcp or null;
hasPiPackage = pkgs ? pi;
defaultPiImageArchive =
if hasPiPackage
then
pkgs.dockerTools.buildLayeredImage {
name = "pi-agent";
tag = "latest";
contents = with pkgs; [
bashInteractive
bun
cacert
coreutils
findutils
git
gnugrep
gnused
nix
nodejs
pi
];
config = {
Env = [
"PATH=/bin:/usr/bin"
"NIX_REMOTE=daemon"
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
];
WorkingDir = "/tmp";
Cmd = ["${pkgs.coreutils}/bin/sleep" "infinity"];
};
}
else null;
in {
options.coding.agents.pi = {
enable = mkEnableOption "Pi agent management via canonical agent.toml definitions";
container = mkOption {
description = "Run Pi through a rootless Podman container while keeping a native host UX.";
default = {};
type = types.submodule {
options = {
enable = mkEnableOption "Containerized Pi wrapper";
name = mkOption {
type = types.str;
default = "pi-agent";
description = "Container name used by the Pi wrapper.";
};
image = mkOption {
type = types.str;
default = if hasPiPackage then "pi-agent:latest" else "docker.io/nixos/nix:latest";
description = ''
Podman image to run for Pi.
Defaults to a local declarative Pi-ready image when `pkgs.pi` exists,
otherwise falls back to docker.io/nixos/nix:latest.
'';
};
imageArchive = mkOption {
type = types.nullOr types.path;
default = defaultPiImageArchive;
description = ''
Optional OCI/Docker archive path to load into Podman when `image`
is missing locally. By default, a Pi-ready local image archive is
generated when `pkgs.pi` is available.
'';
};
projectRoots = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Allowlisted absolute host roots that may be mounted into the container.
Wrapper exits with a clear error when cwd is outside these roots.
'';
example = ["/home/m3tam3re/p"];
};
autoStart = mkOption {
type = types.bool;
default = true;
description = "Automatically start container when wrapper is invoked and it is not running.";
};
autoNixDevelop = mkOption {
type = types.bool;
default = false;
description = ''
If true and cwd contains flake.nix, run Pi as:
nix develop -c pi ...
inside the container.
'';
};
extraRunArgs = mkOption {
type = types.listOf types.str;
default = [];
description = "Additional Podman create args appended after safe defaults.";
};
extraEnv = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Extra environment variables passed to the container.";
};
};
};
};
mcpServers = mkOption {
type = types.attrsOf types.anything;
default = if mcpCfg != null then mcpCfg.servers else {};
@@ -170,6 +277,176 @@ in {
piSettings = filterNulls cfg.settings;
projectRoots = map toString cfg.container.projectRoots;
projectRootsShell = concatStringsSep " " (map escapeShellArg projectRoots);
extraRunArgsShell = concatStringsSep " " (map escapeShellArg cfg.container.extraRunArgs);
extraEnvPairs = map (k: "${k}=${cfg.container.extraEnv.${k}}") (builtins.attrNames cfg.container.extraEnv);
extraEnvShell = concatStringsSep " " (map escapeShellArg extraEnvPairs);
hostPiDir = "${config.home.homeDirectory}/.pi";
hostPiDirShell = escapeShellArg hostPiDir;
imageArchiveShell =
if cfg.container.imageArchive != null
then escapeShellArg (toString cfg.container.imageArchive)
else "";
piWrapper = pkgs.writeShellScriptBin "pi" ''
set -euo pipefail
PODMAN="${pkgs.podman}/bin/podman"
REALPATH="${pkgs.coreutils}/bin/realpath"
CONTAINER_NAME=${escapeShellArg cfg.container.name}
IMAGE=${escapeShellArg cfg.container.image}
IMAGE_ARCHIVE=${imageArchiveShell}
AUTO_START=${if cfg.container.autoStart then "1" else "0"}
AUTO_NIX_DEVELOP=${if cfg.container.autoNixDevelop then "1" else "0"}
HOST_PI_DIR=${hostPiDirShell}
PROJECT_ROOTS=(${projectRootsShell})
EXTRA_RUN_ARGS=(${extraRunArgsShell})
EXTRA_ENV_VARS=(${extraEnvShell})
err() {
printf "pi-wrapper: %s\n" "$1" >&2
exit 1
}
if [ "''${#PROJECT_ROOTS[@]}" -eq 0 ]; then
err "No allowed projectRoots configured. Set coding.agents.pi.container.projectRoots."
fi
if ! command -v "$PODMAN" >/dev/null 2>&1; then
err "podman binary not found at $PODMAN"
fi
CWD="$($REALPATH -m "$PWD")"
cwd_allowed=0
NORMALIZED_ROOTS=()
for root in "''${PROJECT_ROOTS[@]}"; do
norm_root="$($REALPATH -m "$root")"
NORMALIZED_ROOTS+=("$norm_root")
case "$CWD/" in
"$norm_root/"*)
cwd_allowed=1
;;
esac
done
if [ "$cwd_allowed" -ne 1 ]; then
{
printf "pi-wrapper: cwd '%s' is outside allowed projectRoots.\n" "$CWD"
printf "Allowed roots:\n"
for root in "''${NORMALIZED_ROOTS[@]}"; do
printf " - %s\n" "$root"
done
} >&2
exit 1
fi
tty_args=()
if [ -t 0 ] && [ -t 1 ]; then
tty_args=(-it)
fi
ensure_image_available() {
if [ -n "$IMAGE_ARCHIVE" ] && [ -f "$IMAGE_ARCHIVE" ]; then
"$PODMAN" load -i "$IMAGE_ARCHIVE" >/dev/null
fi
if ! "$PODMAN" image exists "$IMAGE"; then
err "Container image '$IMAGE' is not available and no valid imageArchive was provided."
fi
}
create_container() {
mount_args=()
for root in "''${NORMALIZED_ROOTS[@]}"; do
mount_args+=("-v" "$root:$root:rw")
done
if [ ! -S /nix/var/nix/daemon-socket/socket ]; then
err "Host Nix daemon socket not found at /nix/var/nix/daemon-socket/socket"
fi
mount_args+=("-v" "/nix/var/nix/daemon-socket/socket:/nix/var/nix/daemon-socket/socket:rw")
mkdir -p "$HOST_PI_DIR"
mount_args+=("-v" "$HOST_PI_DIR:/tmp/.pi:rw")
if [ -d /nix/store ]; then
mount_args+=("-v" "/nix/store:/nix/store:ro")
fi
if [ -e /etc/nix/nix.conf ]; then
mount_args+=("-v" "/etc/nix/nix.conf:/etc/nix/nix.conf:ro")
fi
if [ -d /etc/ssl/certs ]; then
mount_args+=("-v" "/etc/ssl/certs:/etc/ssl/certs:ro")
fi
if [ -d /etc/pki ]; then
mount_args+=("-v" "/etc/pki:/etc/pki:ro")
fi
env_args=()
for kv in "''${EXTRA_ENV_VARS[@]}"; do
env_args+=("--env" "$kv")
done
"$PODMAN" create \
--name "$CONTAINER_NAME" \
--hostname "$CONTAINER_NAME" \
--userns keep-id \
--user "$(${pkgs.coreutils}/bin/id -u):$(${pkgs.coreutils}/bin/id -g)" \
--security-opt no-new-privileges \
--workdir /tmp \
--tmpfs /tmp:rw,nodev,nosuid \
--env HOME=/tmp \
--env NIX_REMOTE=daemon \
--env NPM_CONFIG_PREFIX=/tmp/.npm-global \
--env npm_config_prefix=/tmp/.npm-global \
--env NPM_CONFIG_CACHE=/tmp/.npm \
--env npm_config_cache=/tmp/.npm \
--env PATH=/tmp/.npm-global/bin:/bin:/usr/bin \
"''${mount_args[@]}" \
"''${env_args[@]}" \
"''${EXTRA_RUN_ARGS[@]}" \
"$IMAGE" \
sleep infinity >/dev/null
}
ensure_container_running() {
if ! "$PODMAN" container exists "$CONTAINER_NAME"; then
ensure_image_available
create_container
fi
running="$($PODMAN inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null || true)"
if [ "$running" != "true" ]; then
if [ "$AUTO_START" = "1" ]; then
"$PODMAN" start "$CONTAINER_NAME" >/dev/null
else
err "Container '$CONTAINER_NAME' is not running and autoStart=false. Start it manually with: podman start $CONTAINER_NAME"
fi
fi
}
ensure_container_running
if [ "$AUTO_NIX_DEVELOP" = "1" ] && [ -f "$CWD/flake.nix" ]; then
exec "$PODMAN" exec "''${tty_args[@]}" --workdir "$CWD" "$CONTAINER_NAME" nix develop -c pi "$@"
fi
if "$PODMAN" exec --workdir "$CWD" "$CONTAINER_NAME" sh -lc 'command -v pi >/dev/null 2>&1'; then
exec "$PODMAN" exec "''${tty_args[@]}" --workdir "$CWD" "$CONTAINER_NAME" pi "$@"
fi
err "Container '$CONTAINER_NAME' does not have 'pi' in PATH (image: $IMAGE). Use a Pi-ready image or run from a flake project with autoNixDevelop=true."
'';
# Rendered agents (only computed when agentsInput is set)
rendered =
if cfg.agentsInput != null
@@ -192,12 +469,41 @@ in {
builtins.listToAttrs (
map (name: {
name = ".pi/agent/agents/${name}.md";
value = {source = "${rendered}/agents/${name}.md";};
value = {text = builtins.readFile "${rendered}/agents/${name}.md";};
})
agentNames
)
else {};
skillsSource =
if cfg.agentsInput != null
then
cfg.agentsInput.lib.mkOpencodeSkills {
inherit pkgs;
customSkills = "${cfg.agentsInput}/skills";
}
else null;
in {
assertions =
[
{
assertion = cfg.container.enable || hasPiPackage;
message = "coding.agents.pi.enable requires pkgs.pi when container mode is disabled.";
}
]
++ optional cfg.container.enable {
assertion = cfg.container.projectRoots != [];
message = "coding.agents.pi.container.projectRoots must contain at least one absolute path when container mode is enabled.";
}
++ optional cfg.container.enable {
assertion = all (path: hasPrefix "/" (toString path)) cfg.container.projectRoots;
message = "coding.agents.pi.container.projectRoots entries must be absolute paths.";
};
home.packages =
(optional cfg.container.enable piWrapper)
++ (optional (!cfg.container.enable && hasPiPackage) pkgs.pi);
home.file = mkMerge [
# ── MCP servers from programs.mcp → ~/.pi/agent/mcp.json ───────
(mkIf (cfg.mcpServers != {}) {
@@ -211,24 +517,32 @@ in {
# ── AGENTS.md — agent descriptions and specialist listing ──────
(mkIf (cfg.agentsInput != null) {
".pi/agent/AGENTS.md".source = "${rendered}/AGENTS.md";
".pi/agent/AGENTS.md".text = builtins.readFile "${rendered}/AGENTS.md";
})
# ── SYSTEM.md — primary agent's system prompt ──────────────────
(mkIf (cfg.agentsInput != null) {
".pi/agent/SYSTEM.md".source = "${rendered}/SYSTEM.md";
".pi/agent/SYSTEM.md".text = builtins.readFile "${rendered}/SYSTEM.md";
})
# ── Agents — pi-subagents .md files ────────────────────────────
agentFiles
# ── Skills symlinked from AGENTS repo ──────────────────────────
(mkIf (cfg.agentsInput != null) {
".pi/agent/skills".source = cfg.agentsInput.lib.mkOpencodeSkills {
inherit pkgs;
customSkills = "${cfg.agentsInput}/skills";
};
# ── Skills symlinked from AGENTS repo (non-container mode) ─────
(mkIf (cfg.agentsInput != null && !cfg.container.enable) {
".pi/agent/skills".source = skillsSource;
})
];
home.activation.piMaterializeSkills = mkIf (cfg.container.enable && cfg.agentsInput != null) (
lib.hm.dag.entryAfter ["writeBoundary"] ''
skillsSrc=${escapeShellArg "${skillsSource}"}
skillsDst=${escapeShellArg "${config.home.homeDirectory}/.pi/agent/skills"}
${pkgs.coreutils}/bin/rm -rf "$skillsDst"
${pkgs.coreutils}/bin/mkdir -p "$skillsDst"
${pkgs.coreutils}/bin/cp -aL "$skillsSrc"/. "$skillsDst"/
''
);
});
}