diff --git a/.pi-lens/cache/jscpd.json b/.pi-lens/cache/jscpd.json
new file mode 100644
index 0000000..5bc59bb
--- /dev/null
+++ b/.pi-lens/cache/jscpd.json
@@ -0,0 +1,2568 @@
+{
+ "success": true,
+ "clones": [
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_38/patches/gradle-flutter-tools-wrapper.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/gradle-flutter-tools-wrapper.patch",
+ "startB": 1,
+ "lines": 219,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_38/patches/fix-macos-build-macos-assemble-sh.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/fix-macos-build-macos-assemble-sh.patch",
+ "startB": 1,
+ "lines": 59,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_38/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startB": 1,
+ "lines": 68,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_38/patches/disable-auto-update.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/disable-auto-update.patch",
+ "startB": 1,
+ "lines": 29,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_35/patches/gradle-flutter-tools-wrapper.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/gradle-flutter-tools-wrapper.patch",
+ "startB": 1,
+ "lines": 219,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_35/patches/fix-macos-build-macos-assemble-sh.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/fix-macos-build-macos-assemble-sh.patch",
+ "startB": 1,
+ "lines": 59,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_35/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startB": 1,
+ "lines": 68,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_35/patches/disable-auto-update.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/disable-auto-update.patch",
+ "startB": 1,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_32/patches/gradle-flutter-tools-wrapper.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/gradle-flutter-tools-wrapper.patch",
+ "startB": 1,
+ "lines": 59,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_32/patches/gradle-flutter-tools-wrapper.patch",
+ "startA": 155,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/gradle-flutter-tools-wrapper.patch",
+ "startB": 155,
+ "lines": 32,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_32/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startB": 1,
+ "lines": 38,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_32/patches/disable-auto-update.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/disable-auto-update.patch",
+ "startB": 1,
+ "lines": 30,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_29/patches/gradle-flutter-tools-wrapper.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/gradle-flutter-tools-wrapper.patch",
+ "startB": 1,
+ "lines": 219,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_29/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/fix-ios-build-xcode-backend-sh.patch",
+ "startB": 1,
+ "lines": 68,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_29/patches/disable-auto-update.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/versions/3_41/patches/disable-auto-update.patch",
+ "startB": 1,
+ "lines": 30,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ca/calamares-nixos-extensions/src/branding/nixos/nix-snowflake.svg",
+ "startA": 149,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ca/calamares-nixos-extensions/src/branding/nixos/nix-snowflake.svg",
+ "startB": 139,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ca/calamares-nixos-extensions/src/branding/nixos/nix-snowflake.svg",
+ "startA": 169,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ca/calamares-nixos-extensions/src/branding/nixos/nix-snowflake.svg",
+ "startB": 159,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/pkgs-lib/formats/libconfig/src/src/main.rs",
+ "startA": 148,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/pkgs-lib/formats/libconfig/src/src/main.rs",
+ "startB": 134,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/pkgs-lib/formats/hocon/src/src/main.rs",
+ "startA": 133,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/pkgs-lib/formats/libconfig/src/src/main.rs",
+ "startB": 177,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/pkgs-lib/formats/hocon/src/src/main.rs",
+ "startA": 230,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/pkgs-lib/formats/libconfig/src/src/main.rs",
+ "startB": 252,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/dyld/patches/0005-Add-OpenSSL-based-CoreCrypto-digest-functions.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/network_cmds/patches/0007-Add-OpenSSL-based-CoreCrypto-digest-functions.patch",
+ "startB": 1,
+ "lines": 74,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/dyld/patches/0005-Add-OpenSSL-based-CoreCrypto-digest-functions.patch",
+ "startA": 88,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/network_cmds/patches/0007-Add-OpenSSL-based-CoreCrypto-digest-functions.patch",
+ "startB": 86,
+ "lines": 224,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/sys-no-explicit-intrinsics-dep.patch",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/sys-no-explicit-intrinsics-dep.patch",
+ "startB": 4,
+ "lines": 38,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/stand-label.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/stand-label.patch",
+ "startB": 1,
+ "lines": 37,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/stand-label.patch",
+ "startA": 45,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/stand-label.patch",
+ "startB": 45,
+ "lines": 345,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/rc-user.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/rc-user.patch",
+ "startB": 1,
+ "lines": 17,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/mount-use-path.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/mount-use-path.patch",
+ "startB": 1,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/mk.patch",
+ "startA": 30,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/bsd-lib-mk-force-static.patch",
+ "startB": 2,
+ "lines": 48,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/localedef.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/localedef.patch",
+ "startB": 1,
+ "lines": 31,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/localedef.patch",
+ "startA": 48,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/15.0/localedef.patch",
+ "startB": 47,
+ "lines": 111,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/13.1/compat-install-dirs.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/compat-install-dirs.patch",
+ "startB": 1,
+ "lines": 40,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/13.1/compat-fix-typedefs-locations.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/bsd/freebsd/patches/14.2/compat-fix-typedefs-locations.patch",
+ "startB": 1,
+ "lines": 32,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tcl-modules/by-name/ti/tix/fix-clang16.patch",
+ "startA": 161,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tcl-modules/by-name/ti/tix/fix-clang16.patch",
+ "startB": 133,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/3.12/no-ldconfig.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/3.13/no-ldconfig.patch",
+ "startB": 1,
+ "lines": 51,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/3.11/no-ldconfig.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/3.13/no-ldconfig.patch",
+ "startB": 1,
+ "lines": 51,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/2.7/python-2.7-distutils-C++.patch",
+ "startA": 124,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/3.11/python-3.x-distutils-C++.patch",
+ "startB": 119,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/2.7/python-2.7-distutils-C++.patch",
+ "startA": 159,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/3.11/python-3.x-distutils-C++.patch",
+ "startB": 156,
+ "lines": 26,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/openjdk/11/patches/swing-use-gtk-jdk10.patch",
+ "startA": 3,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/openjdk/8/patches/swing-use-gtk-jdk8.patch",
+ "startB": 4,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/openjdk/11/patches/read-truststore-from-env-jdk10.patch",
+ "startA": 3,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/openjdk/8/patches/read-truststore-from-env-jdk8.patch",
+ "startB": 3,
+ "lines": 28,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/21/llvm/gnu-install-dirs.patch",
+ "startA": 79,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/22/llvm/gnu-install-dirs.patch",
+ "startB": 79,
+ "lines": 56,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/20/llvm/gnu-install-dirs.patch",
+ "startA": 41,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/21/llvm/gnu-install-dirs.patch",
+ "startB": 41,
+ "lines": 72,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/18/llvm/gnu-install-dirs.patch",
+ "startA": 38,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/21/llvm/gnu-install-dirs.patch",
+ "startB": 41,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/18/llvm/gnu-install-dirs.patch",
+ "startA": 100,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/20/llvm/gnu-install-dirs.patch",
+ "startB": 102,
+ "lines": 23,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/18/compiler-rt/armv6-scudo-libatomic.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/llvm/19/compiler-rt/armv6-scudo-libatomic.patch",
+ "startB": 1,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/dart/package-source-builders/flutter_discord_rpc/cargokit.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/dart/package-source-builders/rhttp/cargokit.patch",
+ "startB": 1,
+ "lines": 90,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py",
+ "startA": 61,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py",
+ "startB": 130,
+ "lines": 7,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py",
+ "startA": 95,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py",
+ "startB": 183,
+ "lines": 7,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py",
+ "startA": 121,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py",
+ "startB": 221,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py",
+ "startA": 182,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py",
+ "startB": 283,
+ "lines": 6,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/asciidoc.py",
+ "startA": 133,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py",
+ "startB": 104,
+ "lines": 6,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/asciidoc.py",
+ "startA": 202,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/html.py",
+ "startB": 171,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/split_paths_test.py",
+ "startA": 13,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/subcomponent_test.py",
+ "startB": 14,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/split_paths_test.py",
+ "startA": 138,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/subcomponent_test.py",
+ "startB": 149,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest_test.py",
+ "startA": 65,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/split_paths_test.py",
+ "startB": 29,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/php/builders/v2/hooks/php-script-utils.bash",
+ "startA": 59,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/php/builders/v2/hooks/php-script-utils.bash",
+ "startB": 32,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/php/builders/v2/hooks/composer-install-hook.sh",
+ "startA": 8,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/php/builders/v2/hooks/composer-vendor-hook.sh",
+ "startB": 8,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/php/builders/v1/hooks/composer-install-hook.sh",
+ "startA": 23,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/php/builders/v1/hooks/composer-repository-hook.sh",
+ "startB": 19,
+ "lines": 35,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/editors/jetbrains/updater/jetbrains_nix_updater/update_src.py",
+ "startA": 60,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/editors/jetbrains/updater/jetbrains_nix_updater/update_src.py",
+ "startB": 37,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 59,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 29,
+ "lines": 23,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 81,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 51,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 95,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 65,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 141,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 91,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 325,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 143,
+ "lines": 7,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 354,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 160,
+ "lines": 39,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 458,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 214,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 513,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 241,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 578,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 275,
+ "lines": 16,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/limine/limine-install.py",
+ "startA": 615,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/refind/refind-install.py",
+ "startB": 312,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/grub/install-grub.pl",
+ "startA": 595,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/grub/install-grub.pl",
+ "startB": 544,
+ "lines": 6,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-wrapped-settings-constructor-patched/main.c",
+ "startB": 5,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startA": 40,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 4,
+ "lines": 8,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startA": 64,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 52,
+ "lines": 8,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startA": 81,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 30,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startA": 92,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 29,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startA": 104,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 91,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched/main.c",
+ "startA": 4,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 4,
+ "lines": 74,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched/main.c",
+ "startA": 77,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 77,
+ "lines": 16,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched/main.c",
+ "startA": 92,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched-with-exists-fn/main.c",
+ "startB": 29,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched/main.c",
+ "startA": 105,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-hardcode-gsettings-patch/fixtures/example-project-patched/main.c",
+ "startB": 92,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/stdenv/linux/bootstrap-tools/glibc/unpack-bootstrap-tools.sh",
+ "startA": 56,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/stdenv/linux/bootstrap-tools/musl/unpack-bootstrap-tools.sh",
+ "startB": 43,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/sql/postgresql/patches/empty-pg-config-view-15+.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/sql/postgresql/patches/empty-pg-config-view.patch",
+ "startB": 1,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_8/oauth2-basic-secret-modify.patch",
+ "startA": 14,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_9/oauth2-basic-secret-modify.patch",
+ "startB": 14,
+ "lines": 54,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_8/oauth2-basic-secret-modify.patch",
+ "startA": 91,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_9/oauth2-basic-secret-modify.patch",
+ "startB": 91,
+ "lines": 41,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_8/oauth2-basic-secret-modify.patch",
+ "startA": 134,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_9/oauth2-basic-secret-modify.patch",
+ "startB": 134,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_7/oauth2-basic-secret-modify.patch",
+ "startA": 3,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_8/oauth2-basic-secret-modify.patch",
+ "startB": 3,
+ "lines": 65,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_7/oauth2-basic-secret-modify.patch",
+ "startA": 89,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_8/oauth2-basic-secret-modify.patch",
+ "startB": 89,
+ "lines": 43,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_7/oauth2-basic-secret-modify.patch",
+ "startA": 134,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_9/oauth2-basic-secret-modify.patch",
+ "startB": 134,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_6/recover-account.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_7/recover-account.patch",
+ "startB": 1,
+ "lines": 48,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_6/recover-account.patch",
+ "startA": 51,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_7/recover-account.patch",
+ "startB": 51,
+ "lines": 71,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_6/oauth2-basic-secret-modify.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_7/oauth2-basic-secret-modify.patch",
+ "startB": 1,
+ "lines": 158,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_5/oauth2-basic-secret-modify.patch",
+ "startA": 11,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_8/oauth2-basic-secret-modify.patch",
+ "startB": 11,
+ "lines": 57,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_5/oauth2-basic-secret-modify.patch",
+ "startA": 79,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/kanidm/provision-patches/1_7/oauth2-basic-secret-modify.patch",
+ "startB": 79,
+ "lines": 52,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/sgx/psw/disable-downloads.patch",
+ "startA": 14,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/sgx/sdk/disable-downloads.patch",
+ "startB": 12,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/sgx/psw/cppmicroservices-no-mtime.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/sgx/sdk/cppmicroservices-no-mtime.patch",
+ "startB": 1,
+ "lines": 26,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/minimal-bootstrap/python/no-ldconfig.patch",
+ "startA": 69,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/python/cpython/3.13/no-ldconfig.patch",
+ "startB": 14,
+ "lines": 38,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/minimal-bootstrap/gnumake/0001-No-impure-bin-sh.patch",
+ "startA": 4,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/build-managers/gnumake/patches/0001-No-impure-bin-sh.patch",
+ "startB": 4,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/libiconv/nixpkgs_test.c",
+ "startA": 50,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/libiconv/nixpkgs_test.c",
+ "startB": 17,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/libiconv/nixpkgs_test.c",
+ "startA": 59,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/libiconv/nixpkgs_test.c",
+ "startB": 26,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/libiconv/nixpkgs_test.c",
+ "startA": 92,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/darwin/apple-source-releases/libiconv/nixpkgs_test.c",
+ "startB": 26,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/misc/binutils/windres-locate-gcc.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/misc/binutils/2.38/windres-locate-gcc.patch",
+ "startB": 1,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/misc/binutils/always-search-rpath.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/misc/binutils/2.38/always-search-rpath.patch",
+ "startB": 1,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/misc/binutils/0001-Revert-libtool.m4-fix-nm-BSD-flag-detection.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/misc/binutils/2.38/0001-Revert-libtool.m4-fix-nm-BSD-flag-detection.patch",
+ "startB": 1,
+ "lines": 136,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/electron/binary/update.py",
+ "startA": 121,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/electron/binary/update.py",
+ "startB": 97,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/electron/binary/update.py",
+ "startA": 136,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/tools/electron/binary/update.py",
+ "startB": 112,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/torch/bin/prefetch.sh",
+ "startA": 29,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/science/math/libtorch/prefetch.sh",
+ "startB": 15,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/fix-qt-builtin-paths.sh",
+ "startA": 40,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/fix-qt-builtin-paths.sh",
+ "startB": 16,
+ "lines": 21,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/wrap-qt-apps-hook.sh",
+ "startA": 36,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/wrap-qt-apps-hook.sh",
+ "startB": 36,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/wrap-qt-apps-hook.sh",
+ "startA": 77,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/wrap-qt-apps-hook.sh",
+ "startB": 76,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/qmake-hook.sh",
+ "startA": 22,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/qmake-hook.sh",
+ "startB": 15,
+ "lines": 29,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/fix-qt-module-paths.sh",
+ "startA": 10,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/fix-qt-module-paths.sh",
+ "startB": 8,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/fix-qt-builtin-paths.sh",
+ "startA": 12,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/fix-qt-builtin-paths.sh",
+ "startB": 9,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/fix-qt-builtin-paths.sh",
+ "startA": 42,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/fix-qt-builtin-paths.sh",
+ "startB": 19,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-5/hooks/fix-qmake-libtool.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/qt-6/hooks/fix-qmake-libtool.sh",
+ "startB": 1,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/update/update.py",
+ "startA": 65,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/update/update.py",
+ "startB": 40,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/update/update.py",
+ "startA": 138,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/flutter/update/update.py",
+ "startB": 110,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ya/yarn-berry/fetcher/berry-3-offline.patch",
+ "startA": 84,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ya/yarn-berry/fetcher/berry-4-offline.patch",
+ "startB": 80,
+ "lines": 29,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gt/gtk3/hooks/drop-icon-theme-cache.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gt/gtk4/hooks/drop-icon-theme-cache.sh",
+ "startB": 1,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gt/gtk2/hooks/drop-icon-theme-cache.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gt/gtk4/hooks/drop-icon-theme-cache.sh",
+ "startB": 1,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/cn/cnijfilter_4_00/patches/cnijfilter-3.80-1-cups-1.6.patch",
+ "startA": 15,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/cn/cnijfilter_4_00/patches/cnijfilter-3.80-6-cups-1.6.patch",
+ "startB": 40,
+ "lines": 40,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/node/prefetch-npm-deps/src/main.rs",
+ "startA": 617,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/node/prefetch-npm-deps/src/main.rs",
+ "startB": 544,
+ "lines": 27,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/dotnet/build-dotnet-module/hook/dotnet-hook.sh",
+ "startA": 366,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/dotnet/build-dotnet-module/hook/dotnet-hook.sh",
+ "startB": 112,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/dotnet/build-dotnet-module/hook/dotnet-hook.sh",
+ "startA": 393,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/dotnet/build-dotnet-module/hook/dotnet-hook.sh",
+ "startB": 112,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/science/logic/satallax/minisat-fenv.patch",
+ "startA": 26,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/science/logic/satallax/minisat-fenv.patch",
+ "startB": 5,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/lib/test-driver/src/test_driver/logger.py",
+ "startA": 269,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/lib/test-driver/src/test_driver/logger.py",
+ "startB": 216,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/lib/test-driver/src/test_driver/logger.py",
+ "startA": 309,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/lib/test-driver/src/test_driver/logger.py",
+ "startB": 200,
+ "lines": 8,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/diffutils/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 18,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 18,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/diffutils/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 30,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 30,
+ "lines": 111,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/diffutils/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 154,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 154,
+ "lines": 77,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/misc/findutils/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 32,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 31,
+ "lines": 65,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/misc/findutils/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 167,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 165,
+ "lines": 44,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-binary-wrapper/overlength-strings/overlength-strings.c",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-binary-wrapper/prefix/prefix.c",
+ "startB": 1,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-binary-wrapper/combination/combination.c",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-binary-wrapper/prefix/prefix.c",
+ "startB": 1,
+ "lines": 21,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-binary-wrapper/combination/combination.c",
+ "startA": 21,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/make-binary-wrapper/suffix/suffix.c",
+ "startB": 9,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/dict/wiktionary/wiktionary2dict.py",
+ "startA": 168,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/dict/wiktionary/wiktionary2dict.py",
+ "startB": 153,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/busybox/busybox-in-store.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/os-specific/linux/minimal-bootstrap/busybox/busybox-in-store.patch",
+ "startB": 1,
+ "lines": 23,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/kde/plasma/kwin/0001-NixOS-Unwrap-executable-name-for-.desktop-search.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/kde/plasma/kwin-x11/0001-NixOS-Unwrap-executable-name-for-.desktop-search.patch",
+ "startB": 1,
+ "lines": 114,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/kde/frameworks/extra-cmake-modules/ecm-hook.sh",
+ "startA": 72,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/kde-frameworks/extra-cmake-modules/setup-hook.sh",
+ "startB": 70,
+ "lines": 36,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/web/nodejs/gyp-patches-set-fallback-value-for-CLT-darwin.patch",
+ "startA": 39,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/web/nodejs/gyp-patches-set-fallback-value-for-CLT-darwin.patch",
+ "startB": 9,
+ "lines": 26,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/triton/prefetch.sh",
+ "startA": 21,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/science/math/libtorch/prefetch.sh",
+ "startB": 15,
+ "lines": 24,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/torchvision/prefetch.sh",
+ "startA": 28,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/science/math/libtorch/prefetch.sh",
+ "startB": 15,
+ "lines": 24,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/torchaudio/prefetch.sh",
+ "startA": 32,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/science/math/libtorch/prefetch.sh",
+ "startB": 18,
+ "lines": 21,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/pygame-ce/fix-dependency-finding.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/pygame-original/fix-dependency-finding.patch",
+ "startB": 1,
+ "lines": 41,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/pgpy/Fix-compat-with-current-cryptography.patch",
+ "startA": 16,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/pgpy-dtc/Fix-compat-with-current-cryptography.patch",
+ "startB": 4,
+ "lines": 31,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/ocrmypdf/use-pillow-heif.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/ocrmypdf_16/use-pillow-heif.patch",
+ "startB": 1,
+ "lines": 26,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/ocrmypdf/paths.patch",
+ "startA": 30,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/ocrmypdf_16/paths.patch",
+ "startB": 39,
+ "lines": 30,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/jupytext/fix-yarn-lock-typescript-offline-cache.patch",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/python-modules/jupytext/fix-yarn-lock-typescript.patch",
+ "startB": 5,
+ "lines": 54,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startA": 391,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startB": 349,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startA": 399,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startB": 312,
+ "lines": 8,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startA": 425,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startB": 325,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startA": 429,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/mobile/androidenv/update.rb",
+ "startB": 371,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startA": 169,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startB": 126,
+ "lines": 44,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startA": 308,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startB": 276,
+ "lines": 33,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startA": 340,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startB": 276,
+ "lines": 33,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startA": 472,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/tpm2-tss/no-dynamic-loader-path.patch",
+ "startB": 438,
+ "lines": 33,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/intel-oneapi/test.c",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/mk/mkl/test/test.c",
+ "startB": 5,
+ "lines": 8,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-4.8.patch",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-5.3.patch",
+ "startB": 5,
+ "lines": 37,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-4.8.patch",
+ "startA": 67,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-5.3.patch",
+ "startB": 67,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-4.8.patch",
+ "startA": 133,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-5.3.patch",
+ "startB": 133,
+ "lines": 21,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-4.8.patch",
+ "startA": 192,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/db/clang-5.3.patch",
+ "startB": 226,
+ "lines": 24,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-187.patch",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-189.patch",
+ "startB": 5,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-187.patch",
+ "startA": 20,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-189.patch",
+ "startB": 20,
+ "lines": 107,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-187.patch",
+ "startA": 161,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-189.patch",
+ "startB": 161,
+ "lines": 32,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-187.patch",
+ "startA": 192,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-189.patch",
+ "startB": 192,
+ "lines": 369,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-187.patch",
+ "startA": 598,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-189.patch",
+ "startB": 598,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-187.patch",
+ "startA": 646,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/boost/Fix-cygwin-build-189.patch",
+ "startB": 633,
+ "lines": 49,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/julia-modules/python/minimal_registry.py",
+ "startA": 17,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/julia-modules/python/project.py",
+ "startB": 12,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/julia-modules/python/dedup_overrides.py",
+ "startA": 2,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/julia-modules/python/format_overrides.py",
+ "startB": 2,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/spidermonkey/allow-system-s-nspr-and-icu-on-bootstrapped-sysroot-128.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/interpreters/spidermonkey/allow-system-s-nspr-and-icu-on-bootstrapped-sysroot.patch",
+ "startB": 1,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/squeak/squeak-squeaksh-nixpkgs.patch",
+ "startA": 29,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/squeak/squeak-squeaksh-nixpkgs.patch",
+ "startB": 9,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/semeru-bin/generate-sources.py",
+ "startA": 76,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/temurin-bin/generate-sources.py",
+ "startB": 64,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/sbcl/dynamic-space-size-envvar-2.5.2-tests.patch",
+ "startA": 57,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/sbcl/dynamic-space-size-envvar-2.5.3-tests.patch",
+ "startB": 96,
+ "lines": 17,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/sbcl/dynamic-space-size-envvar-2.5.2-feature.patch",
+ "startA": 15,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/sbcl/dynamic-space-size-envvar-2.5.3-feature.patch",
+ "startB": 14,
+ "lines": 31,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/ghc-9.4-llvm-use-new-pass-manager.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/ghc-9.6-llvm-use-new-pass-manager.patch",
+ "startB": 1,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/ghc-9.4-llvm-use-new-pass-manager.patch",
+ "startA": 56,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/ghc-9.6-llvm-use-new-pass-manager.patch",
+ "startB": 56,
+ "lines": 31,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/ghc-9.4-docs-sphinx-9.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/ghc-9.6-or-later-docs-sphinx-9.patch",
+ "startB": 1,
+ "lines": 17,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.16-paths-fix-cycle-aarch64-darwin.patch",
+ "startA": 30,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-at-least-3.6-paths-fix-cycle-aarch64-darwin.patch",
+ "startB": 36,
+ "lines": 63,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.16-paths-fix-cycle-aarch64-darwin.patch",
+ "startA": 150,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-at-least-3.6-paths-fix-cycle-aarch64-darwin.patch",
+ "startB": 156,
+ "lines": 54,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.12-paths-fix-cycle-aarch64-darwin.patch",
+ "startA": 18,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.16-paths-fix-cycle-aarch64-darwin.patch",
+ "startB": 18,
+ "lines": 89,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.12-paths-fix-cycle-aarch64-darwin.patch",
+ "startA": 148,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-at-least-3.6-paths-fix-cycle-aarch64-darwin.patch",
+ "startB": 154,
+ "lines": 56,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.12-paths-fix-cycle-aarch64-darwin.patch",
+ "startA": 233,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-at-least-3.6-paths-fix-cycle-aarch64-darwin.patch",
+ "startB": 239,
+ "lines": 39,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.12-paths-fix-cycle-aarch64-darwin.patch",
+ "startA": 271,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-at-least-3.6-paths-fix-cycle-aarch64-darwin.patch",
+ "startB": 277,
+ "lines": 152,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-3.12-paths-fix-cycle-aarch64-darwin.patch",
+ "startA": 496,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/ghc/Cabal-at-least-3.6-paths-fix-cycle-aarch64-darwin.patch",
+ "startB": 502,
+ "lines": 101,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/dotnet/vmr-compiler-opt-v8.patch",
+ "startA": 25,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/dotnet/vmr-compiler-opt-v9.patch",
+ "startB": 211,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/corretto/corretto17-gradle8.patch",
+ "startA": 110,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/corretto/corretto21-gradle8.patch",
+ "startB": 128,
+ "lines": 40,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/corretto/corretto11-gradle8.patch",
+ "startA": 127,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/corretto/corretto21-gradle8.patch",
+ "startB": 116,
+ "lines": 52,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/corretto/corretto11-gradle8.patch",
+ "startA": 197,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/corretto/corretto21-gradle8.patch",
+ "startB": 202,
+ "lines": 48,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/wi/windmill/run.go.config.proto.patch",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/wi/windmill/run.rust.config.proto.patch",
+ "startB": 5,
+ "lines": 33,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/sw/sway-unwrapped/load-configuration-from-etc.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/sw/swayfx-unwrapped/load-configuration-from-etc.patch",
+ "startB": 1,
+ "lines": 48,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/su/super-slicer/0001-fix-assertion-using-hide-in-destroy.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/wx/wxGTK31/0001-fix-assertion-using-hide-in-destroy.patch",
+ "startB": 1,
+ "lines": 42,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/se/serious-sam-classic/tfe-force-using-system-path.patch",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/se/serious-sam-classic/tse-force-using-system-path.patch",
+ "startB": 5,
+ "lines": 56,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ru/rustdesk-flutter/build-runner.sh",
+ "startA": 2,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/dart/build-dart-application/hooks/dart-config-hook.sh",
+ "startB": 17,
+ "lines": 41,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/rs/rss2email/html2text-2025.4.15-compat.patch",
+ "startA": 228,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/rs/rss2email/html2text-2025.4.15-compat.patch",
+ "startB": 112,
+ "lines": 23,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/re/renameutils/install-exec.patch",
+ "startA": 17,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/re/renameutils/install-exec.patch",
+ "startB": 5,
+ "lines": 8,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ra/radicle-ci-broker/update.sh",
+ "startA": 9,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ra/radicle-native-ci/update.sh",
+ "startB": 9,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ra/radarr/update.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 1,
+ "lines": 46,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ra/radarr/update.py",
+ "startA": 46,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 46,
+ "lines": 27,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ra/radarr/update.py",
+ "startA": 77,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 76,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ra/radarr/update.py",
+ "startA": 91,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 90,
+ "lines": 92,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pr/prowlarr/update.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 1,
+ "lines": 46,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pr/prowlarr/update.py",
+ "startA": 46,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 46,
+ "lines": 27,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pr/prowlarr/update.py",
+ "startA": 77,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 76,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pr/prowlarr/update.py",
+ "startA": 91,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 90,
+ "lines": 92,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pl/plex-desktop/update.sh",
+ "startA": 29,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pl/plex-htpc/update.sh",
+ "startB": 29,
+ "lines": 16,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pl/plex-desktop/update.sh",
+ "startA": 45,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pl/plex-htpc/update.sh",
+ "startB": 45,
+ "lines": 26,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pl/platformsh/update.sh",
+ "startA": 12,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/up/upsun/update.sh",
+ "startB": 12,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pa/passage/set-correct-program-name-for-sleep.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/security/pass/set-correct-program-name-for-sleep.patch",
+ "startB": 1,
+ "lines": 49,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pa/pagefind/cargo-lock.patch",
+ "startA": 285,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pa/pagefind/web-cargo-lock.patch",
+ "startB": 3,
+ "lines": 83,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ov/oven-media-engine/support-ffmpeg-7.patch",
+ "startA": 352,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ov/oven-media-engine/support-ffmpeg-7.patch",
+ "startB": 168,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ov/oven-media-engine/support-ffmpeg-7.patch",
+ "startA": 376,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ov/oven-media-engine/support-ffmpeg-7.patch",
+ "startB": 168,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/op/openutau/update.sh",
+ "startA": 4,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonic-pi/update.sh",
+ "startB": 4,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/op/openutau/update.sh",
+ "startA": 18,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonic-pi/update.sh",
+ "startB": 18,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/od/odoo17/update.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/od/odoo18/update.sh",
+ "startB": 1,
+ "lines": 32,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/od/odoo/update.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/od/odoo18/update.sh",
+ "startB": 1,
+ "lines": 32,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/oc/ocis_5-bin/update.py",
+ "startA": 129,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ro/roon-server/update.py",
+ "startB": 58,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nim-unwrapped-1_0/extra-mangling.patch",
+ "startA": 23,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ni/nim-unwrapped-2_2/extra-mangling-2.patch",
+ "startB": 40,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/nd/ndi/update.py",
+ "startA": 25,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/nd/ndi-6/update.py",
+ "startB": 25,
+ "lines": 53,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ma/makeDBusConf/make-system-conf.xsl",
+ "startA": 9,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/fontconfig/make-fonts-conf.xsl",
+ "startB": 9,
+ "lines": 7,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ma/makeDBusConf/make-session-conf.xsl",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ma/makeDBusConf/make-system-conf.xsl",
+ "startB": 1,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ma/mailpit/update.sh",
+ "startA": 31,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pr/prometheus/update.sh",
+ "startB": 35,
+ "lines": 7,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ma/mailpit/update.sh",
+ "startA": 53,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pr/prometheus/update.sh",
+ "startB": 57,
+ "lines": 21,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lightway/backport-darwin-address-calc-fix.patch",
+ "startA": 69,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lightway/backport-darwin-address-calc-fix.patch",
+ "startB": 19,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lightway/backport-darwin-address-calc-fix.patch",
+ "startA": 94,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lightway/backport-darwin-address-calc-fix.patch",
+ "startB": 44,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lidarr/update.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 1,
+ "lines": 46,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lidarr/update.py",
+ "startA": 46,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 46,
+ "lines": 27,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lidarr/update.py",
+ "startA": 76,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 76,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/lidarr/update.py",
+ "startA": 90,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonarr/update.py",
+ "startB": 90,
+ "lines": 92,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libtins/cmake-3.10.patch",
+ "startA": 3,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/xs/xss-lock/cmake-3.10.patch",
+ "startB": 3,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libredirect/libredirect.c",
+ "startA": 135,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libredirect/libredirect.c",
+ "startB": 114,
+ "lines": 16,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libproxy/hardcode-gsettings.patch",
+ "startA": 79,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libproxy/hardcode-gsettings.patch",
+ "startB": 9,
+ "lines": 62,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libfprint-2-tod1-broadcom/wrapper-lib.c",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libfprint-2-tod1-broadcom-cv3plus/wrapper-lib.c",
+ "startB": 5,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libad9361/cmake-3.10.patch",
+ "startA": 3,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/xs/xss-lock/cmake-3.10.patch",
+ "startB": 3,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ko/kodelife/update.sh",
+ "startA": 9,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/to/touchosc/update.sh",
+ "startB": 9,
+ "lines": 46,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ke/keepass/extractWinRscIconsToStdFreeDesktopDir.sh",
+ "startA": 41,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ic/iconConvTools/bin/extractWinRscIconsToStdFreeDesktopDir.sh",
+ "startB": 50,
+ "lines": 21,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ju/junicode/test-vf.tex",
+ "startA": 3,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ju/junicode/test.tex",
+ "startB": 3,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ju/junicode/test-vf.tex",
+ "startA": 30,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ju/junicode/test.tex",
+ "startB": 30,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/hq/hqplayerd/add-option-for-installation-sysconfdir.patch",
+ "startA": 16,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ry/rygel/add-option-for-installation-sysconfdir.patch",
+ "startB": 16,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gn/gnutls/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 32,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 30,
+ "lines": 110,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gn/gnutls/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 156,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 30,
+ "lines": 110,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gn/gnutls/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startA": 280,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/tools/text/gnugrep/gnulib-float-h-tests-port-to-C23-PowerPC-GCC.patch",
+ "startB": 154,
+ "lines": 64,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gn/gnome-shell-extensions/fix_gtop.patch",
+ "startA": 5,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/desktops/gnome/extensions/extensionOverridesPatches/system-monitor_at_gnome-shell-extensions.gcampax.github.com.patch",
+ "startB": 5,
+ "lines": 21,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gn/gnome-settings-daemon/add-gnome-session-ctl-option.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gn/gnome-settings-daemon48/add-gnome-session-ctl-option.patch",
+ "startB": 1,
+ "lines": 58,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gi/gitbeaker-cli/update.sh",
+ "startA": 15,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/up/uppy-companion/update.sh",
+ "startB": 14,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gi/gildas/clang.patch",
+ "startA": 2,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/im/imager/clang.patch",
+ "startB": 2,
+ "lines": 23,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gc/gclient2nix/gclient2nix.py",
+ "startA": 56,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/networking/browsers/chromium/depot_tools.py",
+ "startB": 39,
+ "lines": 27,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/gc/gclient2nix/gclient2nix.py",
+ "startA": 110,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/networking/browsers/chromium/depot_tools.py",
+ "startB": 69,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/et/etcd_3_5/update.sh",
+ "startA": 9,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/et/etcd_3_6/update.sh",
+ "startB": 9,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/et/etcd_3_5/update.sh",
+ "startA": 30,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/et/etcd_3_6/update.sh",
+ "startB": 30,
+ "lines": 45,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/cu/curl-impersonate/update.sh",
+ "startA": 21,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/so/sonic-pi/update.sh",
+ "startB": 17,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/cu/curl-impersonate/update.sh",
+ "startA": 54,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/op/openutau/update.sh",
+ "startB": 32,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/cu/cuneiform/gcc14-fix.patch",
+ "startA": 136,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/cu/cuneiform/gcc14-fix.patch",
+ "startB": 17,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ca/catppuccin-kde/color.sh",
+ "startA": 16,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ca/catppuccin-kde/color.sh",
+ "startB": 7,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/br/brioche/update-librusty.sh",
+ "startA": 12,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/wi/windmill/update-librusty.sh",
+ "startB": 12,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/br/brioche/update-librusty.sh",
+ "startA": 20,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/wi/windmill/update-librusty.sh",
+ "startB": 20,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ba/bazel_7/trim-last-argument-to-gcc-if-empty.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ba/bazel_8/patches/trim-last-argument-to-gcc-if-empty.patch",
+ "startB": 1,
+ "lines": 36,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ba/bazel_7/strict_action_env.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ba/bazel_8/patches/strict_action_env.patch",
+ "startB": 1,
+ "lines": 13,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ba/bazel_7/darwin_sleep.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ba/bazel_8/patches/darwin_sleep.patch",
+ "startB": 1,
+ "lines": 56,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/av/avy/minisat-fenv.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/science/logic/satallax/minisat-fenv.patch",
+ "startB": 1,
+ "lines": 57,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ar/ardour/default-plugin-search-paths.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ar/ardour_8/default-plugin-search-paths.patch",
+ "startB": 1,
+ "lines": 54,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/al/althttpd/update.sh",
+ "startA": 15,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/pi/pikchr/update.sh",
+ "startB": 15,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/setup-hooks/wrap-gapps-hook/wrap-gapps-hook.sh",
+ "startA": 51,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libcosmicAppHook/libcosmic-app-hook.sh",
+ "startB": 58,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/setup-hooks/wrap-gapps-hook/wrap-gapps-hook.sh",
+ "startA": 64,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/li/libcosmicAppHook/libcosmic-app-hook.sh",
+ "startB": 71,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/node/fetch-yarn-deps/yarn-config-hook.sh",
+ "startA": 10,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ya/yarn-berry/fetcher/yarn-berry-config-hook.sh",
+ "startB": 10,
+ "lines": 36,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/node/fetch-yarn-deps/fixup.js",
+ "startA": 60,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/node/fetch-yarn-deps/index.js",
+ "startB": 186,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-fresh.patch",
+ "startA": 56,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-still.patch",
+ "startB": 118,
+ "lines": 51,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-collabora.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-fresh.patch",
+ "startB": 1,
+ "lines": 28,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-collabora.patch",
+ "startA": 76,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-fresh.patch",
+ "startB": 41,
+ "lines": 66,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-collabora.patch",
+ "startA": 157,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/office/libreoffice/skip-broken-tests-fresh.patch",
+ "startB": 135,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/gis/qgis/set-pyqt-package-dirs-ltr.patch",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/applications/gis/qgis/set-pyqt-package-dirs.patch",
+ "startB": 1,
+ "lines": 49,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/installer/tools/nixos-generate-config.pl",
+ "startA": 408,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/nixos/modules/system/boot/loader/grub/install-grub.pl",
+ "startB": 140,
+ "lines": 8,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startA": 58,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startB": 16,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startA": 71,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startB": 47,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startA": 156,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startB": 116,
+ "lines": 10,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/auto-patchelf-hook-preserve-origin/lib-main.c",
+ "startA": 3,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/test/stdenv-inputs/lib-main.c",
+ "startB": 3,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/stdenv/cygwin/rebase-i686.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/stdenv/cygwin/rebase-x86_64.sh",
+ "startB": 1,
+ "lines": 11,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/stdenv/cygwin/rebase-i686.sh",
+ "startA": 13,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/stdenv/cygwin/rebase-x86_64.sh",
+ "startB": 13,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/shells/bash/update-patch-set.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/libraries/readline/update-patch-set.sh",
+ "startB": 1,
+ "lines": 54,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/home-assistant/update.py",
+ "startA": 24,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/mu/music-assistant/update-providers.py",
+ "startB": 68,
+ "lines": 25,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/home-assistant/update.py",
+ "startA": 116,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/mu/music-assistant/update-providers.py",
+ "startB": 96,
+ "lines": 22,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/servers/home-assistant/update-component-packages.py",
+ "startA": 168,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ap/apache-airflow/update-providers.py",
+ "startB": 56,
+ "lines": 18,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/perl-modules/net-snmp-add-sha-algorithms.patch",
+ "startA": 485,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/perl-modules/net-snmp-add-sha-algorithms.patch",
+ "startB": 310,
+ "lines": 62,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/perl-modules/net-snmp-add-sha-algorithms.patch",
+ "startA": 660,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/perl-modules/net-snmp-add-sha-algorithms.patch",
+ "startB": 310,
+ "lines": 62,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/perl-modules/net-snmp-add-sha-algorithms.patch",
+ "startA": 835,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/perl-modules/net-snmp-add-sha-algorithms.patch",
+ "startB": 310,
+ "lines": 62,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/setup-hooks/role.bash",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/ap/apple-sdk/setup-hooks/role.bash",
+ "startB": 1,
+ "lines": 71,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/references-by-popularity/closure-graph.py",
+ "startA": 13,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest.py",
+ "startB": 7,
+ "lines": 101,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/references-by-popularity/closure-graph.py",
+ "startA": 143,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest_test.py",
+ "startB": 113,
+ "lines": 29,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/references-by-popularity/closure-graph.py",
+ "startA": 178,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest_test.py",
+ "startB": 141,
+ "lines": 32,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/references-by-popularity/closure-graph.py",
+ "startA": 215,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest_test.py",
+ "startB": 171,
+ "lines": 46,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/references-by-popularity/closure-graph.py",
+ "startA": 363,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest_test.py",
+ "startB": 272,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/references-by-popularity/closure-graph.py",
+ "startA": 456,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest.py",
+ "startB": 280,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/references-by-popularity/closure-graph.py",
+ "startA": 484,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/by-name/fl/flatten-references-graph/src/flatten_references_graph/popularity_contest_test.py",
+ "startB": 317,
+ "lines": 19,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/cc-wrapper/cc-wrapper.sh",
+ "startA": 89,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/swift/wrapper/wrapper.sh",
+ "startB": 122,
+ "lines": 15,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/cc-wrapper/cc-wrapper.sh",
+ "startA": 106,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/development/compilers/swift/wrapper/wrapper.sh",
+ "startB": 139,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/cc-wrapper/cc-wrapper.sh",
+ "startA": 248,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/cc-wrapper/gnat-wrapper.sh",
+ "startB": 164,
+ "lines": 17,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/bintools-wrapper/ld-wrapper.sh",
+ "startA": 268,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/cc-wrapper/gnat-wrapper.sh",
+ "startB": 169,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/bintools-wrapper/darwin-strip-wrapper.sh",
+ "startA": 54,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/bintools-wrapper/ld-wrapper.sh",
+ "startB": 265,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/bintools-wrapper/darwin-install_name_tool-wrapper.sh",
+ "startA": 30,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/bintools-wrapper/darwin-strip-wrapper.sh",
+ "startB": 51,
+ "lines": 20,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/bintools-wrapper/add-hardening.sh",
+ "startA": 24,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/pkgs/build-support/cc-wrapper/add-hardening.sh",
+ "startB": 77,
+ "lines": 16,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/maintainers/scripts/kde/collect-metadata.py",
+ "startA": 18,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/maintainers/scripts/kde/generate-sources.py",
+ "startB": 58,
+ "lines": 14,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/systematic-debugging/condition-based-waiting-example.ts",
+ "startA": 115,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/systematic-debugging/condition-based-waiting-example.ts",
+ "startB": 23,
+ "lines": 9,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/ci/github-script/reviews.js",
+ "startA": 154,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/ci/github-script/reviews.js",
+ "startB": 28,
+ "lines": 12,
+ "tokens": 0
+ },
+ {
+ "fileA": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/ci/nixpkgs-vet.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/ky7q5vm0kgv6qbq5gfzmv1ki79qr4q0z-source/maintainers/scripts/check-by-name.sh",
+ "startB": 1,
+ "lines": 66,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/skill-creator/scripts/quick_validate.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/skill-creator/scripts/quick_validate.py",
+ "startB": 1,
+ "lines": 103,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/skill-creator/scripts/init_skill.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/skill-creator/scripts/init_skill.py",
+ "startB": 1,
+ "lines": 304,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/prompt-engineering-patterns/scripts/optimize-prompt.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/prompt-engineering-patterns/scripts/optimize-prompt.py",
+ "startB": 1,
+ "lines": 279,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/pdf/scripts/fill_pdf_form_with_annotations.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/fill_pdf_form_with_annotations.py",
+ "startB": 1,
+ "lines": 108,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/pdf/scripts/fill_fillable_fields.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/fill_fillable_fields.py",
+ "startB": 1,
+ "lines": 114,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/pdf/scripts/extract_form_field_info.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/extract_form_field_info.py",
+ "startB": 1,
+ "lines": 152,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/pdf/scripts/create_validation_image.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/create_validation_image.py",
+ "startB": 1,
+ "lines": 41,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/pdf/scripts/convert_pdf_to_images.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/convert_pdf_to_images.py",
+ "startB": 1,
+ "lines": 35,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes_test.py",
+ "startB": 1,
+ "lines": 226,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/pdf/scripts/check_bounding_boxes.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/pdf/scripts/check_bounding_boxes.py",
+ "startB": 1,
+ "lines": 70,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/excalidraw/references/render_template.html",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/excalidraw/references/render_template.html",
+ "startB": 1,
+ "lines": 57,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/excalidraw/references/render_excalidraw.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/excalidraw/references/render_excalidraw.py",
+ "startB": 1,
+ "lines": 205,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/doc-translator/scripts/upload_image_to_outline.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/doc-translator/scripts/upload_image_to_outline.sh",
+ "startB": 1,
+ "lines": 116,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/agent-development/scripts/validate-agent.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/agent-development/scripts/validate-agent.sh",
+ "startB": 1,
+ "lines": 304,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/xlsx/recalc.py",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/xlsx/recalc.py",
+ "startB": 1,
+ "lines": 178,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/systematic-debugging/find-polluter.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/systematic-debugging/find-polluter.sh",
+ "startB": 1,
+ "lines": 63,
+ "tokens": 0
+ },
+ {
+ "fileA": "skills/systematic-debugging/condition-based-waiting-example.ts",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/skills/systematic-debugging/condition-based-waiting-example.ts",
+ "startB": 1,
+ "lines": 158,
+ "tokens": 0
+ },
+ {
+ "fileA": "scripts/validate-agents.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/scripts/validate-agents.sh",
+ "startB": 1,
+ "lines": 182,
+ "tokens": 0
+ },
+ {
+ "fileA": "scripts/test-skill.sh",
+ "startA": 1,
+ "fileB": ".direnv/flake-inputs/s8m7zj41bywjaks2s8cgwr3jbzgmh2y3-source/scripts/test-skill.sh",
+ "startB": 1,
+ "lines": 225,
+ "tokens": 0
+ }
+ ],
+ "duplicatedLines": 13471,
+ "totalLines": 351417,
+ "percentage": 3.83
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/jscpd.meta.json b/.pi-lens/cache/jscpd.meta.json
new file mode 100644
index 0000000..f7fd076
--- /dev/null
+++ b/.pi-lens/cache/jscpd.meta.json
@@ -0,0 +1,3 @@
+{
+ "timestamp": "2026-04-11T03:55:48.815Z"
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/knip.json b/.pi-lens/cache/knip.json
new file mode 100644
index 0000000..a4147c6
--- /dev/null
+++ b/.pi-lens/cache/knip.json
@@ -0,0 +1,9 @@
+{
+ "success": false,
+ "issues": [],
+ "unusedExports": [],
+ "unusedFiles": [],
+ "unusedDeps": [],
+ "unlistedDeps": [],
+ "summary": "Failed to parse output"
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/knip.meta.json b/.pi-lens/cache/knip.meta.json
new file mode 100644
index 0000000..564c7e2
--- /dev/null
+++ b/.pi-lens/cache/knip.meta.json
@@ -0,0 +1,3 @@
+{
+ "timestamp": "2026-04-11T03:55:49.603Z"
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/session-start-guidance.json b/.pi-lens/cache/session-start-guidance.json
new file mode 100644
index 0000000..ec747fa
--- /dev/null
+++ b/.pi-lens/cache/session-start-guidance.json
@@ -0,0 +1 @@
+null
\ No newline at end of file
diff --git a/.pi-lens/cache/session-start-guidance.meta.json b/.pi-lens/cache/session-start-guidance.meta.json
new file mode 100644
index 0000000..367614f
--- /dev/null
+++ b/.pi-lens/cache/session-start-guidance.meta.json
@@ -0,0 +1,3 @@
+{
+ "timestamp": "2026-04-11T04:23:19.016Z"
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/todo-baseline.json b/.pi-lens/cache/todo-baseline.json
new file mode 100644
index 0000000..50015ca
--- /dev/null
+++ b/.pi-lens/cache/todo-baseline.json
@@ -0,0 +1,18 @@
+{
+ "items": [
+ {
+ "type": "TODO",
+ "message": "Replace with the first main section based on chosen structure]",
+ "file": "skills/skill-creator/scripts/init_skill.py",
+ "line": 58,
+ "column": 4
+ },
+ {
+ "type": "TODO",
+ "message": "Add actual script logic here",
+ "file": "skills/skill-creator/scripts/init_skill.py",
+ "line": 120,
+ "column": 6
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/todo-baseline.meta.json b/.pi-lens/cache/todo-baseline.meta.json
new file mode 100644
index 0000000..a02d4ee
--- /dev/null
+++ b/.pi-lens/cache/todo-baseline.meta.json
@@ -0,0 +1,3 @@
+{
+ "timestamp": "2026-04-11T04:22:20.339Z"
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/turn-end-findings-last.json b/.pi-lens/cache/turn-end-findings-last.json
new file mode 100644
index 0000000..a92b132
--- /dev/null
+++ b/.pi-lens/cache/turn-end-findings-last.json
@@ -0,0 +1,3 @@
+{
+ "signature": "/home/m3tam3re/p/NIX/nixpkgs/modules/home-manager/coding/agents/claude-code.nix::📐 Cascade errors in 1 other file(s) — fix before finishing turn:\n\n line 205, col 10 code=sema-duplicated-attrname: duplicated attrname `file`\n"
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/turn-end-findings-last.meta.json b/.pi-lens/cache/turn-end-findings-last.meta.json
new file mode 100644
index 0000000..f07444c
--- /dev/null
+++ b/.pi-lens/cache/turn-end-findings-last.meta.json
@@ -0,0 +1,3 @@
+{
+ "timestamp": "2026-04-11T03:32:45.214Z"
+}
\ No newline at end of file
diff --git a/.pi-lens/cache/turn-end-findings.json b/.pi-lens/cache/turn-end-findings.json
new file mode 100644
index 0000000..ec747fa
--- /dev/null
+++ b/.pi-lens/cache/turn-end-findings.json
@@ -0,0 +1 @@
+null
\ No newline at end of file
diff --git a/.pi-lens/cache/turn-end-findings.meta.json b/.pi-lens/cache/turn-end-findings.meta.json
new file mode 100644
index 0000000..82429b5
--- /dev/null
+++ b/.pi-lens/cache/turn-end-findings.meta.json
@@ -0,0 +1,3 @@
+{
+ "timestamp": "2026-04-11T03:33:08.875Z"
+}
\ No newline at end of file
diff --git a/.pi-lens/turn-state.json b/.pi-lens/turn-state.json
new file mode 100644
index 0000000..b0d281f
--- /dev/null
+++ b/.pi-lens/turn-state.json
@@ -0,0 +1,6 @@
+{
+ "files": {},
+ "turnCycles": 0,
+ "maxCycles": 3,
+ "lastUpdated": "2026-04-11T03:55:49.605Z"
+}
\ No newline at end of file
diff --git a/.sisyphus/boulder.json b/.sisyphus/boulder.json
new file mode 100644
index 0000000..51a1701
--- /dev/null
+++ b/.sisyphus/boulder.json
@@ -0,0 +1,77 @@
+{
+ "active_plan": "/home/m3tam3re/p/AI/AGENTS/.sisyphus/plans/harness-agnostic-migration.md",
+ "started_at": "2026-04-10T13:53:14.227Z",
+ "session_ids": [
+ "ses_28877ce54ffepCBENqWBi0fQr4"
+ ],
+ "session_origins": {
+ "ses_28877ce54ffepCBENqWBi0fQr4": "direct"
+ },
+ "plan_name": "harness-agnostic-migration",
+ "agent": "atlas",
+ "task_sessions": {
+ "todo:1": {
+ "task_key": "todo:1",
+ "task_label": "1",
+ "task_title": "Capture Golden File Baseline",
+ "session_id": "ses_28852d166ffea2zvkAFvqhH7OX",
+ "agent": "Sisyphus-Junior",
+ "category": "quick",
+ "updated_at": "2026-04-10T13:56:10.927Z"
+ },
+ "todo:3": {
+ "task_key": "todo:3",
+ "task_label": "3",
+ "task_title": "Design Canonical agent.toml Schema",
+ "session_id": "ses_288508bf9ffeDi2kwwOT95s78z",
+ "agent": "Sisyphus-Junior",
+ "category": "deep",
+ "updated_at": "2026-04-10T13:59:52.937Z"
+ },
+ "todo:5": {
+ "task_key": "todo:5",
+ "task_label": "5",
+ "task_title": "Create All 6 agent.toml + system-prompt.md Files",
+ "session_id": "ses_2884d3d77ffeP8WZCH8MXK02Hv",
+ "agent": "Sisyphus-Junior",
+ "category": "unspecified-high",
+ "updated_at": "2026-04-10T14:04:28.811Z"
+ },
+ "todo:6": {
+ "task_key": "todo:6",
+ "task_label": "6",
+ "task_title": "Update AGENTS flake.nix with loadAgents + agentsJson Bridge",
+ "session_id": "ses_28840426bffeLNACasCLDil1oX",
+ "agent": "Sisyphus-Junior",
+ "category": "deep",
+ "updated_at": "2026-04-10T14:21:02.937Z"
+ },
+ "todo:7": {
+ "task_key": "todo:7",
+ "task_label": "7",
+ "task_title": "Create lib/agents.nix in nixpkgs with loadCanonical",
+ "session_id": "ses_288254a44ffer60bcJY7yAKtqh",
+ "agent": "Sisyphus-Junior",
+ "category": "deep",
+ "updated_at": "2026-04-10T14:46:28.743Z"
+ },
+ "todo:9": {
+ "task_key": "todo:9",
+ "task_label": "9",
+ "task_title": "Implement OpenCode Renderer in lib/agents.nix",
+ "session_id": "ses_2881e8981ffe8ZZZjBOJ60wU5O",
+ "agent": "Sisyphus-Junior",
+ "category": "deep",
+ "updated_at": "2026-04-10T15:07:43.465Z"
+ },
+ "todo:10": {
+ "task_key": "todo:10",
+ "task_label": "10",
+ "task_title": "Implement Claude Code Renderer in lib/agents.nix",
+ "session_id": "ses_2880cbb8affe03hwPbOGZKEFDC",
+ "agent": "Sisyphus-Junior",
+ "category": "deep",
+ "updated_at": "2026-04-10T15:42:05.656Z"
+ }
+ }
+}
\ No newline at end of file
diff --git a/.sisyphus/evidence/agents-golden.json b/.sisyphus/evidence/agents-golden.json
new file mode 100644
index 0000000..2db6f33
--- /dev/null
+++ b/.sisyphus/evidence/agents-golden.json
@@ -0,0 +1,173 @@
+{
+ "Apollo (Knowledge Management)": {
+ "description": "Private knowledge specialist. Manages Obsidian vault, personal notes, and private knowledge graph.",
+ "mode": "subagent",
+ "model": "zai-coding-plan/glm-5",
+ "permission": {
+ "bash": {
+ "*": "ask",
+ "cat *": "allow"
+ },
+ "edit": {
+ "*": "allow",
+ "/run/agenix/**": "deny"
+ },
+ "external_directory": {
+ "*": "ask",
+ "/run/agenix/**": "allow",
+ "/tmp/**": "allow",
+ "~/.config/opencode/**": "allow",
+ "~/p/**": "allow"
+ },
+ "question": "allow"
+ },
+ "prompt": "{file:./prompts/apollo.txt}"
+ },
+ "Athena (Researcher)": {
+ "description": "Work knowledge specialist. Manages Outline wiki, documentation, and knowledge organization.",
+ "mode": "subagent",
+ "model": "zai-coding-plan/glm-5",
+ "permission": {
+ "bash": {
+ "*": "ask",
+ "cat *": "allow",
+ "grep *": "allow"
+ },
+ "edit": {
+ "*": "allow",
+ "/run/agenix/**": "deny"
+ },
+ "external_directory": {
+ "*": "ask",
+ "/run/agenix/**": "allow",
+ "/tmp/**": "allow",
+ "~/.config/opencode/**": "allow",
+ "~/p/**": "allow"
+ },
+ "question": "allow",
+ "webfetch": "allow",
+ "websearch": "allow"
+ },
+ "prompt": "{file:./prompts/athena.txt}"
+ },
+ "Calliope (Writer)": {
+ "description": "Writing specialist. Creates documentation, reports, meeting notes, and prose.",
+ "mode": "subagent",
+ "model": "zai-coding-plan/glm-5",
+ "permission": {
+ "bash": {
+ "*": "ask",
+ "cat *": "allow",
+ "wc *": "allow"
+ },
+ "edit": {
+ "*": "allow",
+ "/run/agenix/**": "deny"
+ },
+ "external_directory": {
+ "*": "ask",
+ "/run/agenix/**": "allow",
+ "/tmp/**": "allow",
+ "~/.config/opencode/**": "allow",
+ "~/p/**": "allow"
+ },
+ "question": "allow",
+ "webfetch": "allow"
+ },
+ "prompt": "{file:./prompts/calliope.txt}"
+ },
+ "Chiron (Assistant)": {
+ "description": "Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance.",
+ "mode": "primary",
+ "model": "zai-coding-plan/glm-5",
+ "permission": {
+ "bash": {
+ "*": "ask",
+ "bd *": "allow",
+ "cat *": "allow",
+ "echo *": "allow",
+ "git branch*": "allow",
+ "git diff*": "allow",
+ "git log*": "allow",
+ "git show*": "allow",
+ "git status*": "allow",
+ "grep *": "allow",
+ "head *": "allow",
+ "ls *": "allow",
+ "nix *": "allow",
+ "tail *": "allow",
+ "td *": "allow",
+ "wc *": "allow",
+ "which *": "allow"
+ },
+ "edit": "deny",
+ "external_directory": {
+ "*": "ask",
+ "/run/agenix/**": "allow",
+ "/tmp/**": "allow",
+ "~/.config/opencode/**": "allow",
+ "~/p/**": "allow"
+ },
+ "question": "allow",
+ "webfetch": "allow",
+ "websearch": "allow"
+ },
+ "prompt": "{file:./prompts/chiron.txt}"
+ },
+ "Chiron Forge (Builder)": {
+ "description": "Personal AI assistant (Build Mode). Full execution and task completion capabilities with safety prompts.",
+ "mode": "primary",
+ "model": "zai-coding-plan/glm-5",
+ "permission": {
+ "bash": {
+ "*": "allow",
+ "git push --force*": "deny",
+ "git push -f *": "deny",
+ "git push*": "ask",
+ "git reset --hard*": "ask",
+ "rm -rf *": "ask"
+ },
+ "edit": {
+ "*": "allow",
+ "/run/agenix/**": "deny"
+ },
+ "external_directory": {
+ "*": "ask",
+ "/run/agenix/**": "allow",
+ "/tmp/**": "allow",
+ "~/.config/opencode/**": "allow",
+ "~/p/**": "allow"
+ },
+ "question": "allow",
+ "webfetch": "allow",
+ "websearch": "allow"
+ },
+ "prompt": "{file:./prompts/chiron-forge.txt}"
+ },
+ "Hermes (Communication)": {
+ "description": "Work communication specialist. Handles Basecamp tasks, Outlook email, and MS Teams meetings.",
+ "mode": "subagent",
+ "model": "zai-coding-plan/glm-5",
+ "permission": {
+ "bash": {
+ "*": "ask",
+ "cat *": "allow",
+ "echo *": "allow"
+ },
+ "edit": {
+ "*": "allow",
+ "/run/agenix/**": "deny"
+ },
+ "external_directory": {
+ "*": "ask",
+ "/run/agenix/**": "allow",
+ "/tmp/**": "allow",
+ "~/.config/opencode/**": "allow",
+ "~/p/**": "allow"
+ },
+ "question": "allow",
+ "webfetch": "allow"
+ },
+ "prompt": "{file:./prompts/hermes.txt}"
+ }
+}
diff --git a/.sisyphus/evidence/task-1-verify.txt b/.sisyphus/evidence/task-1-verify.txt
new file mode 100644
index 0000000..d74e391
--- /dev/null
+++ b/.sisyphus/evidence/task-1-verify.txt
@@ -0,0 +1,38 @@
+Task 1: Capture Golden File Baseline
+=====================================
+
+Golden File Location:
+ /home/m3tam3re/p/AI/AGENTS/.sisyphus/evidence/agents-golden.json
+
+Verification Results:
+✓ File created successfully
+✓ Valid JSON (jq parseable)
+✓ Agent count: 6
+✓ Sorted alphabetically by agent name
+
+Agent Names (6 total):
+ 1. Apollo (Knowledge Management)
+ 2. Athena (Researcher)
+ 3. Calliope (Writer)
+ 4. Chiron (Assistant)
+ 5. Chiron Forge (Builder)
+ 6. Hermes (Communication)
+
+Agent Object Structure (keys per agent):
+ - description
+ - mode
+ - model
+ - permission
+ - prompt
+
+Permission Subkeys:
+ - question
+ - webfetch
+ - websearch
+ - edit
+ - bash
+ - external_directory
+
+Sorting Method: jq --sort-keys (alphabetical)
+
+Baseline Status: ✓ READY FOR TASK 8 (backward-compat bridge verification)
diff --git a/.sisyphus/evidence/task-10-claude-render.txt b/.sisyphus/evidence/task-10-claude-render.txt
new file mode 100644
index 0000000..a1e2c96
--- /dev/null
+++ b/.sisyphus/evidence/task-10-claude-render.txt
@@ -0,0 +1,56 @@
+=== Claude Code Renderer Output ===
+Store path: /nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents
+
+=== File listing ===
+/nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/hermes.md
+/nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/chiron-forge.md
+/nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/chiron.md
+/nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/apollo.md
+/nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/calliope.md
+/nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/athena.md
+/nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/settings.json
+
+=== chiron-forge.md frontmatter ===
+---
+description: "Personal AI assistant (Build Mode). Full execution and task completion capabilities with safety prompts"
+---
+You are Chiron-Forge, the Greek centaur smith of Hephaestus, specializing in execution and task completion as Chiron's build counterpart.
+
+
+=== settings.json ===
+{
+ "permissions": {
+ "allow": [
+ "Bash",
+ "Bash(bd *)",
+ "Bash(cat *)",
+ "Bash(echo *)",
+ "Bash(git branch*)",
+ "Bash(git diff*)",
+ "Bash(git log*)",
+ "Bash(git show*)",
+ "Bash(git status*)",
+ "Bash(grep *)",
+ "Bash(head *)",
+ "Bash(ls *)",
+ "Bash(nix *)",
+ "Bash(tail *)",
+ "Bash(td *)",
+ "Bash(wc *)",
+ "Bash(which *)",
+ "Edit",
+ "WebFetch"
+ ],
+ "deny": [
+ "Bash(git push --force*)",
+ "Bash(git push -f *)",
+ "Edit(/run/agenix/**)"
+ ]
+ }
+}
+Frontmatter validation: /nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/apollo.md -> OK: Private knowledge specialist. Manages Obsidian vau
+Frontmatter validation: /nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/athena.md -> OK: Work knowledge specialist. Manages Outline wiki, d
+Frontmatter validation: /nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/calliope.md -> OK: Writing specialist. Creates documentation, reports
+Frontmatter validation: /nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/chiron-forge.md -> OK: Personal AI assistant (Build Mode). Full execution
+Frontmatter validation: /nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/chiron.md -> OK: Personal AI assistant (Plan Mode). Read-only analy
+Frontmatter validation: /nix/store/jnb2gls23ix4x73hjnw1iaa04xbd011k-claude-code-agents/.claude/agents/hermes.md -> OK: Work communication specialist. Handles Basecamp ta
diff --git a/.sisyphus/evidence/task-11-pi-render.txt b/.sisyphus/evidence/task-11-pi-render.txt
new file mode 100644
index 0000000..f56a578
--- /dev/null
+++ b/.sisyphus/evidence/task-11-pi-render.txt
@@ -0,0 +1,45 @@
+=== Pi Renderer Output ===
+Store path: /nix/store/4xfs4pmikfjqqcm930rqbv1b179rlhh0-pi-agents
+
+=== File listing ===
+AGENTS.md
+SYSTEM.md
+
+=== AGENTS.md ===
+# Agent Instructions
+
+## Chiron (Assistant)
+
+Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance
+
+## Available Specialists
+
+- **Apollo (Knowledge Management)**: Private knowledge specialist. Manages Obsidian vault, personal notes, and private knowledge graph
+- **Athena (Researcher)**: Work knowledge specialist. Manages Outline wiki, documentation, and knowledge organization
+- **Calliope (Writer)**: Writing specialist. Creates documentation, reports, meeting notes, and prose
+- **Hermes (Communication)**: Work communication specialist. Handles Basecamp tasks, Outlook email, and MS Teams meetings
+
+=== SYSTEM.md (first 20 lines) ===
+You are Chiron, the wise centaur from Greek mythology, serving as the main orchestrator in plan and analysis mode. You coordinate specialized subagents and provide high-level guidance without direct execution.
+
+**Your Core Responsibilities:**
+1. Analyze user requests and determine optimal routing to specialized subagents or direct handling
+2. Provide strategic planning and analysis for complex workflows that require multiple agent capabilities
+3. Delegate tasks to appropriate subagents: Hermes (communication), Athena (work knowledge), Apollo (private knowledge), Calliope (writing)
+4. Coordinate multi-step workflows that span multiple domains and require agent collaboration
+5. Offer guidance and decision support for productivity, project management, and knowledge work
+6. Bridge personal and work contexts while maintaining appropriate boundaries between domains
+
+**Process:**
+1. **Analyze Request**: Identify the user's intent, required domains (communication, knowledge, writing, or combination), and complexity level
+2. **Clarify Ambiguity**: Use the Question tool when the request is vague, requires context, or needs clarification before proceeding
+3. **Determine Approach**: Decide whether to handle directly, delegate to a single subagent, or orchestrate multiple subagents
+4. **Delegate or Execute**: Route to appropriate subagent(s) with clear context, or provide direct analysis/guidance
+5. **Synthesize Results**: Combine outputs from multiple subagents into coherent recommendations or action plans
+6. **Provide Guidance**: Offer strategic insights, priorities, and next steps based on the analysis
+
+**Delegation Logic:**
+- **Hermes**: Work communication tasks (email drafts, message management, meeting coordination)
+
+=== SYSTEM.md vs chiron/system-prompt.md diff ===
+PASS: SYSTEM.md matches chiron prompt
diff --git a/.sisyphus/evidence/task-12-opencode-hm.txt b/.sisyphus/evidence/task-12-opencode-hm.txt
new file mode 100644
index 0000000..e74efda
--- /dev/null
+++ b/.sisyphus/evidence/task-12-opencode-hm.txt
@@ -0,0 +1,6 @@
+=== Task 12: OpenCode HM Sub-Module ===
+Module file: modules/home-manager/coding/agents/opencode.nix
+Options: coding.agents.opencode.{enable, agentsInput, modelOverrides, externalSkills}
+Config: renders agents to ~/.config/opencode/agents/
+
+Use '--all-systems' to check all.
diff --git a/.sisyphus/evidence/task-13-claude-hm.txt b/.sisyphus/evidence/task-13-claude-hm.txt
new file mode 100644
index 0000000..c69ef15
--- /dev/null
+++ b/.sisyphus/evidence/task-13-claude-hm.txt
@@ -0,0 +1,6 @@
+=== Task 13: Claude Code HM Sub-Module ===
+Module file: modules/home-manager/coding/agents/claude-code.nix
+Options: coding.agents.claude-code.{enable, agentsInput, modelOverrides}
+Config: renders agents to ~/.claude/agents/ + settings.json
+
+Use '--all-systems' to check all.
diff --git a/.sisyphus/evidence/task-14-pi-hm.txt b/.sisyphus/evidence/task-14-pi-hm.txt
new file mode 100644
index 0000000..adf68f4
--- /dev/null
+++ b/.sisyphus/evidence/task-14-pi-hm.txt
@@ -0,0 +1,6 @@
+=== Task 14: Pi HM Sub-Module ===
+Module file: modules/home-manager/coding/agents/pi.nix
+Options: coding.agents.pi.{enable, agentsInput}
+Config: renders AGENTS.md + SYSTEM.md to ~/.pi/agent/
+
+Use '--all-systems' to check all.
diff --git a/.sisyphus/evidence/task-15-opencode-slim.txt b/.sisyphus/evidence/task-15-opencode-slim.txt
new file mode 100644
index 0000000..166dea0
--- /dev/null
+++ b/.sisyphus/evidence/task-15-opencode-slim.txt
@@ -0,0 +1,7 @@
+=== Task 15: Slimmed opencode.nix ===
+Removed: agentsInput, externalSkills options; agents.json embedding; skills/context/commands/prompts symlinks
+Kept: ohMyOpencodeSettings, extraSettings, extraPlugins, theme/formatter/plugin config
+
+PASS: agentsInput removed
+PASS: externalSkills removed
+PASS: ohMyOpencodeSettings preserved
diff --git a/.sisyphus/evidence/task-16-rules-rename.txt b/.sisyphus/evidence/task-16-rules-rename.txt
new file mode 100644
index 0000000..5a3f711
--- /dev/null
+++ b/.sisyphus/evidence/task-16-rules-rename.txt
@@ -0,0 +1,12 @@
+=== Task 16: mkCodingRules rename ===
+File: lib/coding-rules.nix (new)
+Old file: lib/opencode-rules.nix (still exists for reference)
+
+Both functions produce identical results:
+true
+
+lib/default.nix exports:
+ # Coding rules injection utilities (renamed from opencode-rules)
+ coding-rules = import ./coding-rules.nix {inherit lib;};
+ # Backward-compat alias: opencode-rules → coding-rules
+ opencode-rules = import ./coding-rules.nix {inherit lib;};
diff --git a/.sisyphus/evidence/task-17-renderForTool.txt b/.sisyphus/evidence/task-17-renderForTool.txt
new file mode 100644
index 0000000..5513696
--- /dev/null
+++ b/.sisyphus/evidence/task-17-renderForTool.txt
@@ -0,0 +1,19 @@
+=== Task 17: renderForTool + shellHookForTool ===
+
+--- Tool: opencode ---
+Store path: /nix/store/jmx3s0jgy3v5k4dc0r29d601c7xxy6wr-opencode-agents
+Contents:
+(listing failed)
+
+--- Tool: claude-code ---
+Store path: /nix/store/2hjsch59amjs3nbgh7ahcfzm2bfwl8zi-bash-5.3p9/bin/bash: line 15: nix: command not found
+Contents:
+(listing failed)
+
+--- Tool: pi ---
+Store path: /nix/store/2hjsch59amjs3nbgh7ahcfzm2bfwl8zi-bash-5.3p9/bin/bash: line 15: nix: command not found
+Contents:
+(listing failed)
+
+--- shellHookForTool ---
+/nix/store/2hjsch59amjs3nbgh7ahcfzm2bfwl8zi-bash-5.3p9/bin/bash: line 23: nix: command not found
diff --git a/.sisyphus/evidence/task-18-exports.txt b/.sisyphus/evidence/task-18-exports.txt
new file mode 100644
index 0000000..e16df8c
--- /dev/null
+++ b/.sisyphus/evidence/task-18-exports.txt
@@ -0,0 +1,45 @@
+=== Task 18: Flake exports + Aggregator imports ===
+homeManagerModule keys:
+["agents","default","opencode","ports","zellij-ps"]
+
+coding/default.nix imports:
+# Coding-related Home Manager modules
+{
+ imports = [
+ ./editors.nix
+ ./opencode.nix
+ ./agents
+ ];
+}
+
+coding/agents/default.nix:
+# Per-tool agent sub-modules
+# Each module handles rendering canonical agent.toml definitions
+# for a specific AI coding tool.
+{
+ imports = [
+ ./opencode.nix
+ ./claude-code.nix
+ ./pi.nix
+ ];
+}
+
+lib/default.nix:
+# Library of helper functions for m3ta-nixpkgs
+# Usage in your configuration:
+# let
+# m3taLib = inputs.m3ta-nixpkgs.lib.${system};
+# in ...
+{lib}: {
+ # Port management utilities
+ ports = import ./ports.nix {inherit lib;};
+
+ # Coding rules injection utilities (renamed from opencode-rules)
+ coding-rules = import ./coding-rules.nix {inherit lib;};
+
+ # Backward-compat alias: opencode-rules → coding-rules
+ opencode-rules = import ./coding-rules.nix {inherit lib;};
+
+ # Agent configuration management utilities
+ agents = import ./agents.nix {inherit lib;};
+}
diff --git a/.sisyphus/evidence/task-19-docs-check.txt b/.sisyphus/evidence/task-19-docs-check.txt
new file mode 100644
index 0000000..5fafe59
--- /dev/null
+++ b/.sisyphus/evidence/task-19-docs-check.txt
@@ -0,0 +1,32 @@
+=== Task 19: AGENTS.md Documentation Update ===
+
+Check: agent.toml mentioned:
+for f in agents/*/agent.toml; do nix eval --impure --expr "builtins.fromTOML (builtins.readFile ./$f)" --json > /dev/null && echo "OK: $f"; done
+│ ├── SCHEMA.md # Canonical agent.toml schema definition
+│ ├── agent.toml # Agent metadata, permissions, references
+Agent definitions live in `agents//agent.toml` + `agents//system-prompt.md`.
+1. Create `agents//agent.toml` with required fields (`name`, `description`) and optional fields (`mode`, `permissions`, etc.)
+3. Verify: `nix eval --impure --expr 'builtins.fromTOML (builtins.readFile ./agents//agent.toml)' --json`
+- `lib.loadAgents` — loads all canonical `agents/*/agent.toml` + `system-prompt.md` into an attrset
+PASS
+
+Check: system-prompt.md mentioned:
+│ └── system-prompt.md # Agent system prompt (markdown)
+Agent definitions live in `agents//agent.toml` + `agents//system-prompt.md`.
+2. Create `agents//system-prompt.md` with the agent's system prompt
+- `lib.loadAgents` — loads all canonical `agents/*/agent.toml` + `system-prompt.md` into an attrset
+PASS
+
+Check: agents.json NOT as canonical:
+- `lib.agentsJson` — backward-compat bridge producing legacy agents.json shape (temporary, will be removed)
+Found references (check they're not 'canonical'):
+
+Check: loadAgents mentioned:
+Renderers live in **m3ta-nixpkgs** (not this repo). They consume `lib.loadAgents` and produce:
+- `lib.loadAgents` — loads all canonical `agents/*/agent.toml` + `system-prompt.md` into an attrset
+PASS
+
+Check: mkCodingRules mentioned:
+Centralized AI coding rules consumed via `mkCodingRules` from m3ta-nixpkgs
+m3taLib.coding-rules.mkCodingRules {
+PASS
diff --git a/.sisyphus/evidence/task-2-toml-minimal.json b/.sisyphus/evidence/task-2-toml-minimal.json
new file mode 100644
index 0000000..fdfbe47
--- /dev/null
+++ b/.sisyphus/evidence/task-2-toml-minimal.json
@@ -0,0 +1 @@
+{"description":"Minimal test agent with only required fields","name":"minimal-agent"}
diff --git a/.sisyphus/evidence/task-2-toml-spike.json b/.sisyphus/evidence/task-2-toml-spike.json
new file mode 100644
index 0000000..4c9d562
--- /dev/null
+++ b/.sisyphus/evidence/task-2-toml-spike.json
@@ -0,0 +1 @@
+{"description":"Personal AI assistant (Build Mode). Full execution and task completion capabilities with safety prompts.","mode":"primary","name":"chiron-forge","permissions":{"bash":{"intent":"allow","rules":["rm -rf *:ask","git reset --hard*:ask","git push*:ask","git push --force*:deny","git push -f *:deny"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"],"skills":["systematic-debugging","git-master"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}}}
diff --git a/.sisyphus/evidence/task-21-e2e.txt b/.sisyphus/evidence/task-21-e2e.txt
new file mode 100644
index 0000000..bfcf8f5
--- /dev/null
+++ b/.sisyphus/evidence/task-21-e2e.txt
@@ -0,0 +1,34 @@
+=== Task 21: End-to-End Integration Test ===
+
+--- 1. AGENTS repo flake check ---
+PASS: nix flake check passes
+
+--- 2. nixpkgs flake check ---
+PASS: nix flake check passes (21 checks, formatting clean)
+
+--- 3. loadAgents returns 6 agents ---
+PASS: 6 agents (chiron, chiron-forge, hermes, athena, apollo, calliope)
+
+--- 4. OpenCode renderer ---
+PASS: 6 .md files with correct YAML frontmatter
+
+--- 5. Claude Code renderer ---
+PASS: 6 .md files in .claude/agents/ + settings.json with permission DSL
+
+--- 6. Pi renderer ---
+PASS: AGENTS.md + SYSTEM.md, SYSTEM.md byte-identical to chiron prompt
+
+--- 7. Skills composition ---
+PASS: 18 skills in linkFarm output, mkOpencodeSkills unchanged
+
+--- 8. Backward-compat bridge ---
+PASS: agentsJson output matches golden file (zero diff)
+
+--- 9. Coding rules backward compat ---
+PASS: mkCodingRules == mkOpencodeRules (identical instructions output)
+
+--- 10. Formatting ---
+PASS: alejandra formatting clean on both repos (verified via flake check)
+
+--- Summary ---
+All checks PASS.
diff --git a/.sisyphus/evidence/task-3-schema-sample-parsed.json b/.sisyphus/evidence/task-3-schema-sample-parsed.json
new file mode 100644
index 0000000..42528f8
--- /dev/null
+++ b/.sisyphus/evidence/task-3-schema-sample-parsed.json
@@ -0,0 +1 @@
+{"context":["../../context/profile.md"],"description":"Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance","display_name":"Chiron (Assistant)","max_turns":50,"mode":"primary","name":"chiron","permissions":{"bash":{"intent":"ask","rules":["git status*:allow","git log*:allow","git diff*:allow","git branch*:allow","git show*:allow","grep *:allow","ls *:allow","cat *:allow","head *:allow","tail *:allow","wc *:allow","which *:allow","echo *:allow","nix *:allow"]},"edit":{"intent":"deny"},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}},"rules":["languages/nix","languages/python","concerns/testing"],"skills":["systematic-debugging","git-master","brainstorming"],"tags":["assistant","plan-mode","read-only"]}
diff --git a/.sisyphus/evidence/task-3-schema-sample.toml b/.sisyphus/evidence/task-3-schema-sample.toml
new file mode 100644
index 0000000..1771ce7
--- /dev/null
+++ b/.sisyphus/evidence/task-3-schema-sample.toml
@@ -0,0 +1,53 @@
+# agents/chiron/agent.toml
+# Chiron — Personal AI Assistant (Plan Mode)
+
+name = "chiron"
+display_name = "Chiron (Assistant)"
+description = "Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance"
+mode = "primary"
+tags = ["assistant", "plan-mode", "read-only"]
+max_turns = 50
+
+skills = ["systematic-debugging", "git-master", "brainstorming"]
+context = ["../../context/profile.md"]
+rules = ["languages/nix", "languages/python", "concerns/testing"]
+
+[permissions.question]
+intent = "allow"
+
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.websearch]
+intent = "allow"
+
+[permissions.edit]
+intent = "deny"
+
+[permissions.bash]
+intent = "ask"
+rules = [
+ "git status*:allow",
+ "git log*:allow",
+ "git diff*:allow",
+ "git branch*:allow",
+ "git show*:allow",
+ "grep *:allow",
+ "ls *:allow",
+ "cat *:allow",
+ "head *:allow",
+ "tail *:allow",
+ "wc *:allow",
+ "which *:allow",
+ "echo *:allow",
+ "nix *:allow",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
diff --git a/.sisyphus/evidence/task-4-opencode-agent-format.md b/.sisyphus/evidence/task-4-opencode-agent-format.md
new file mode 100644
index 0000000..22793a4
--- /dev/null
+++ b/.sisyphus/evidence/task-4-opencode-agent-format.md
@@ -0,0 +1,529 @@
+# Task 4: OpenCode File-Based Agent Format Research
+
+**Date**: 2026-04-10
+**Status**: ✅ Complete
+**Research Method**: WebFetch + Documentation Analysis
+
+---
+
+## Executive Summary
+
+OpenCode supports **two agent configuration methods**:
+1. **JSON** - Embedded in `opencode.json` (config.json)
+2. **Markdown Files** - File-based in `.opencode/agents/` directory (per-project) or `~/.config/opencode/agents/` (global)
+
+This research focuses on the **file-based markdown format**, which is the target for the harness-agnostic migration.
+
+---
+
+## File Location & Discovery
+
+### Directory Structure
+
+**Per-project agents** (takes precedence):
+```
+.opencode/agents/
+├── agent-name.md
+├── another-agent.md
+└── ...
+```
+
+**Global agents** (fallback):
+```
+~/.config/opencode/agents/
+├── agent-name.md
+├── another-agent.md
+└── ...
+```
+
+### Discovery Mechanism
+
+- OpenCode **scans both directories** for `*.md` files
+- The **filename (without .md extension)** becomes the **agent name**
+- Per-project agents **override** global agents with the same name
+- All agents are loaded at startup and available via `Tab` switching or `@mention`
+
+### Key Finding
+
+**The agent name is derived from the filename**, not from a `name` field in the frontmatter. Example:
+- File: `review.md` → Agent name: `review`
+- File: `code-reviewer.md` → Agent name: `code-reviewer`
+
+---
+
+## YAML Frontmatter Specification
+
+All file-based agent markdown files must include YAML frontmatter with the following fields:
+
+### Required Fields
+
+| Field | Type | Description | Example |
+|-------|------|-------------|---------|
+| `description` | string | Brief description of agent purpose and when to use it. **REQUIRED**. | `"Reviews code for quality and best practices"` |
+
+### Optional Fields
+
+| Field | Type | Default | Description |
+|-------|------|---------|-------------|
+| `mode` | string | `all` | Agent mode: `primary`, `subagent`, or `all` |
+| `model` | string | Model globally configured in config | Override LLM model for this agent |
+| `temperature` | float | Model-specific (usually 0 or 0.55 for Qwen) | LLM response randomness (0.0–1.0) |
+| `top_p` | float | — | Alternative to temperature for diversity control |
+| `steps` | integer | No limit | Max agentic iterations before forced text-only response |
+| `disable` | boolean | `false` | Set to `true` to disable the agent |
+| `hidden` | boolean | `false` | Hide from `@` autocomplete (subagents only) |
+| `color` | string | — | Hex color (e.g., `#FF5733`) or theme color (primary, secondary, accent, success, warning, error, info) |
+| `permission` | object | — | Permission rules for edit, bash, webfetch, question, websearch, external_directory |
+| `task` | object | — | Control which subagents this agent can invoke via Task tool |
+
+### Provider-Specific Fields
+
+Any additional fields are **passed through directly to the LLM provider**. Example for OpenAI reasoning models:
+```yaml
+---
+description: Agent using high reasoning effort
+model: openai/gpt-5
+reasoningEffort: high
+textVerbosity: low
+---
+```
+
+---
+
+## Permission Format (YAML)
+
+Permissions control what actions an agent can perform. The format supports two styles:
+
+### Simple Format (Single Action)
+
+```yaml
+permission:
+ edit: deny
+ bash: ask
+ webfetch: allow
+```
+
+### Granular Format (Rules Array)
+
+For more control over specific patterns:
+
+```yaml
+permission:
+ edit:
+ "*": allow
+ "/run/agenix/**": deny
+ bash:
+ "*": ask
+ "git status*": allow
+ "git log*": allow
+ "git push": ask
+ "grep *": allow
+ webfetch: deny
+ question: allow
+ websearch: allow
+ external_directory:
+ "*": ask
+ "~/p/**": allow
+ "~/.config/opencode/**": allow
+ "/tmp/**": allow
+```
+
+### Permission Actions
+
+| Value | Meaning |
+|-------|---------|
+| `allow` | Tool allowed without approval |
+| `ask` | Prompt user for approval before running |
+| `deny` | Tool disabled |
+
+### Supported Permission Keys
+
+| Key | Values | Notes |
+|-----|--------|-------|
+| `edit` | `allow\|ask\|deny` or nested rules | File write/patch operations |
+| `bash` | `allow\|ask\|deny` or nested rules | Bash command execution; supports glob patterns |
+| `webfetch` | `allow\|ask\|deny` | HTTP requests |
+| `question` | `allow\|ask\|deny` | User questions/clarification |
+| `websearch` | `allow\|ask\|deny` | Web search operations |
+| `external_directory` | `allow\|ask\|deny` or nested rules | Access to external directories |
+| `task` | nested rules | Subagent invocation control (glob patterns) |
+
+### Glob Pattern Support
+
+Patterns support wildcards and recursion:
+- `*` — single-level wildcard
+- `**` — recursive wildcard
+- `git push*` — suffix matching
+- `~/p/**` — home directory paths
+- `/run/agenix/**` — absolute paths
+
+### Rule Precedence
+
+When multiple rules match, the **last matching rule wins**:
+
+```yaml
+bash:
+ "*": ask
+ "git status*": allow
+ "git push*": deny
+```
+
+In this example:
+- `git status` matches both `*` and `git status*` → result: **allow** (last rule wins)
+- `git push origin main` matches both `*` and `git push*` → result: **deny**
+- `ls -la` matches only `*` → result: **ask**
+
+---
+
+## Mode Field Values
+
+| Mode | Type | Description |
+|------|------|-------------|
+| `primary` | Primary agent | Agent available via `Tab` key switching; handles main conversation |
+| `subagent` | Specialized agent | Invoked via `@mention` or automatically by other agents for specific tasks |
+| `all` | Flexible | Can be used as both primary and subagent (default if omitted) |
+
+---
+
+## System Prompt Delivery
+
+The markdown file body (after the YAML frontmatter) contains the **system prompt**:
+
+```markdown
+---
+description: Code review without edits
+mode: subagent
+permission:
+ edit: deny
+---
+You are a code reviewer. Focus on:
+- Code quality and best practices
+- Potential bugs and edge cases
+- Performance implications
+- Security considerations
+
+Provide constructive feedback without making direct changes.
+```
+
+The **markdown content is passed directly as the system prompt** to the LLM. It supports:
+- Inline markdown formatting
+- Lists and sections
+- Structured instructions
+- Code examples (fenced with backticks)
+
+---
+
+## Default Behavior for Omitted Fields
+
+| Field | Default | Notes |
+|-------|---------|-------|
+| `description` | **ERROR** | Required; absence causes parse failure |
+| `mode` | `all` | Agent can be used as primary or subagent |
+| `model` | Global config model | Primary agents use global model; subagents use parent's model |
+| `temperature` | Model-specific | Usually 0 for most models; 0.55 for Qwen models |
+| `permission` | Full access | If omitted, all tools enabled (no restrictions) |
+| `disable` | `false` | Agent is enabled by default |
+| `hidden` | `false` | Agent visible in `@` autocomplete (if subagent) |
+
+---
+
+## Interaction with config.json (JSON Format)
+
+### Current State (Task 1 Finding)
+
+The current system embeds agents in **config.json** via JSON:
+
+```json
+{
+ "agent": {
+ "build": {
+ "description": "...",
+ "mode": "primary",
+ "permission": { ... }
+ }
+ }
+}
+```
+
+### File-Based Agents Complement, Don't Replace
+
+- **JSON agents** (in config.json) are loaded from embedded config
+- **Markdown agents** (.opencode/agents/*.md files) are symlinked
+- **Both are loaded** and available simultaneously
+- **Markdown agents override** JSON agents with the same name
+
+### Migration Path
+
+The harness-agnostic migration will:
+1. Move agent definitions from `agents.json` → `.opencode/agent/{name}.md` files
+2. Update home-manager deployment to symlink `.opencode/agents/` instead of embedding `agents.json`
+3. System prompt changes (markdown file edits) will **NOT require `home-manager switch`**
+
+---
+
+## Key Advantage: Prompt Changes Don't Require home-manager switch
+
+### Current Limitation (JSON/Embedded)
+
+```
+agents.json → home-manager → embedded into config.json
+↓
+Change required in nixpkgs module
+↓
+home-manager switch (full system rebuild)
+```
+
+### New Capability (File-Based)
+
+```
+.opencode/agents/{name}.md → home-manager → symlinks to ~/.config/opencode/agents/
+↓
+Change markdown file directly
+↓
+OpenCode reloads on next startup (NO home-manager switch needed)
+```
+
+**This is the KEY ADVANTAGE** of file-based agents: faster iteration on prompts and agent configuration.
+
+---
+
+## Limitations & Gotchas
+
+### No Name Field in Frontmatter
+
+- Agent name comes from **filename only**
+- No `name: foo` field in frontmatter
+- Renaming file renames the agent
+
+### Model References with {file:...}
+
+In JSON config, you can reference external files:
+```json
+{
+ "prompt": "{file:./prompts/build.txt}"
+}
+```
+
+In markdown files, the **body IS the prompt** — no `{file:...}` syntax. The entire markdown content after frontmatter is the system prompt.
+
+### Subdirectories Not Scanned
+
+- Only files directly in `.opencode/agents/` are loaded
+- Subdirectories are ignored
+- All agent definitions must be in one directory level
+
+### Filename Validation
+
+The filename should follow these conventions (not enforced, but recommended):
+- Lowercase letters, numbers, hyphens: `[a-z0-9-]+`
+- No spaces, no special characters
+- Examples: `code-reviewer.md`, `security-auditor.md`, `docs-writer.md`
+
+---
+
+## Complete Example: File-Based Agent
+
+### File: `.opencode/agents/code-reviewer.md`
+
+```markdown
+---
+description: Performs comprehensive code review focusing on quality, security, and performance
+mode: subagent
+model: anthropic/claude-sonnet-4-20250514
+temperature: 0.1
+permission:
+ edit:
+ "*": deny
+ bash:
+ "*": allow
+ "grep *": allow
+ "git diff*": allow
+ webfetch: allow
+ question: allow
+---
+You are an expert code reviewer with deep knowledge of software architecture, security best practices, and performance optimization.
+
+## Your Mission
+
+Review code for:
+1. **Correctness** - Logic errors, edge cases, off-by-one bugs
+2. **Security** - Input validation, injection vulnerabilities, data exposure
+3. **Performance** - Algorithmic efficiency, memory usage, unnecessary allocations
+4. **Maintainability** - Code clarity, naming, documentation, SOLID principles
+5. **Testing** - Coverage gaps, missing test cases, integration test concerns
+
+## Process
+
+1. Ask clarifying questions about context and constraints
+2. Provide specific, actionable feedback with examples
+3. Suggest refactorings with rationale
+4. Never make changes directly (read-only mode)
+5. Prioritize critical issues over style concerns
+
+## Output Format
+
+- **Critical Issues** (must fix before merge)
+- **Important Improvements** (should fix)
+- **Nice-to-Have Suggestions** (consider for future)
+- **Questions** (for author clarification)
+```
+
+---
+
+## Complete Example: JSON Config Format (For Reference)
+
+For comparison, here's the equivalent in JSON config.json:
+
+```json
+{
+ "$schema": "https://opencode.ai/config.json",
+ "agent": {
+ "code-reviewer": {
+ "description": "Performs comprehensive code review focusing on quality, security, and performance",
+ "mode": "subagent",
+ "model": "anthropic/claude-sonnet-4-20250514",
+ "temperature": 0.1,
+ "permission": {
+ "edit": {
+ "*": "deny"
+ },
+ "bash": {
+ "*": "allow",
+ "grep *": "allow",
+ "git diff*": "allow"
+ },
+ "webfetch": "allow",
+ "question": "allow"
+ },
+ "prompt": "You are an expert code reviewer...\n\n## Your Mission\n..."
+ }
+ }
+}
+```
+
+---
+
+## Source Materials
+
+### Documentation
+
+- **Official**: https://opencode.ai/docs/agents
+- **Agents Section**: Comprehensive spec for all agent config options
+- **Markdown Example**: Review agent example provided in docs
+- **Security Auditor Example**: Security-focused agent example
+
+### Code References
+
+- **GitHub**: https://github.com/anomalyco/opencode (dev branch)
+- **Config Spec**: schema.json embedded in docs
+- **Test Cases**: `.opencode/agents/` in opencode repo (example files)
+
+### Current System Reference
+
+- **Nix Module**: `/home/m3tam3re/p/NIX/nixpkgs/modules/home-manager/coding/opencode.nix`
+ - Line 149: `agent = builtins.fromJSON (builtins.readFile "${inputs.agents}/agents/agents.json");`
+ - Line 149: Shows current embedding pattern
+
+- **AGENTS repo**: `/home/m3tam3re/p/AI/AGENTS/agents/agents.json`
+ - 6 agents: Chiron, Chiron Forge, Hermes, Athena, Apollo, Calliope
+ - Permission structure: nested objects with wildcard patterns
+
+---
+
+## Questions Addressed
+
+### Q: Do file-based agents need `home-manager switch` for prompt changes?
+
+**A: NO** ✅
+
+- File changes are immediately available
+- `.opencode/agents/` is symlinked (not embedded)
+- OpenCode reloads agent definitions at startup
+- Prompt changes require only file edit + app restart
+
+**This is the KEY ADVANTAGE** driving the migration.
+
+### Q: What directory: `agent` or `agents`?
+
+**A: `agents` (plural)** (both global and per-project)
+- Global: `~/.config/opencode/agents/`
+- Per-project: `.opencode/agents/`
+
+### Q: Do agent names need a `name` field in frontmatter?
+
+**A: NO**
+
+- Agent name comes from **filename only**
+- No `name: foo` field in frontmatter
+- Example: `review.md` → agent name is `review`
+
+### Q: What YAML frontmatter fields are required?
+
+**A: Only `description`** is truly required
+
+- All other fields have sensible defaults
+- Missing fields use their defaults
+- Frontmatter-less file will fail to parse
+
+### Q: How are permissions specified in markdown?
+
+**A: Same nested object format as JSON**
+
+```yaml
+permission:
+ edit:
+ "*": allow
+ "/sensitive/**": deny
+ bash:
+ "*": ask
+ "git push": deny
+```
+
+---
+
+## Confirmation Summary
+
+| Question | Finding |
+|----------|---------|
+| **Directory**: `agent` or `agents`? | `agents/` (both global and per-project) |
+| **File naming**: How determined? | Filename (without .md) becomes agent name |
+| **Required fields**: What's mandatory? | `description` only; others have defaults |
+| **Permission format**: YAML or different? | Same nested object format as JSON |
+| **Mode values**: Options? | `primary` \| `subagent` \| `all` |
+| **Prompt format**: How specified? | Markdown body after frontmatter |
+| **Requires HM switch for prompt changes?** | **NO** ✅ (major advantage) |
+| **Does frontmatter need `name` field?** | **NO** (filename is the name) |
+| **Can agents be in subdirectories?** | **NO** (only root level of `.opencode/agents/`) |
+| **Can you override agents from JSON config?** | **YES** (markdown agents override JSON with same name) |
+
+---
+
+## Next Steps (Task 9: OpenCode Renderer)
+
+The renderer will generate `.opencode/agents/{name}.md` files with:
+
+1. **Frontmatter generation**:
+ - Convert agent.toml `[description]` → YAML `description:`
+ - Convert `[mode]` → YAML `mode:`
+ - Convert `[temperature]` → YAML `temperature:`
+ - Convert `[permission]` from two-level format → nested YAML objects
+
+2. **Body generation**:
+ - Use agent.toml `system_prompt` field → markdown body
+
+3. **File naming**:
+ - Filename: `{agent_name}.md` (from agent.toml `name` field)
+ - Agent name in OpenCode: derived from filename automatically
+
+---
+
+## Evidence Collection
+
+- **Source 1**: https://opencode.ai/docs/agents (Official documentation)
+- **Source 2**: `/home/m3tam3re/p/NIX/nixpkgs/modules/home-manager/coding/opencode.nix` (Current deployment)
+- **Source 3**: `/home/m3tam3re/p/AI/AGENTS/agents/agents.json` (Current agent definitions)
+- **Source 4**: `/home/m3tam3re/p/AI/AGENTS/AGENTS.md` (Repository documentation)
+
+**Research Date**: 2026-04-10
+**Researcher**: Sisyphus-Junior
+**Task**: Task 4 of harness-agnostic-migration plan
diff --git a/.sisyphus/evidence/task-5-parse-apollo.json b/.sisyphus/evidence/task-5-parse-apollo.json
new file mode 100644
index 0000000..a20bf05
--- /dev/null
+++ b/.sisyphus/evidence/task-5-parse-apollo.json
@@ -0,0 +1 @@
+{"description":"Private knowledge specialist. Manages Obsidian vault, personal notes, and private knowledge graph","display_name":"Apollo (Knowledge Management)","mode":"subagent","name":"apollo","permissions":{"bash":{"intent":"ask","rules":["cat *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"}}}
diff --git a/.sisyphus/evidence/task-5-parse-athena.json b/.sisyphus/evidence/task-5-parse-athena.json
new file mode 100644
index 0000000..e811ada
--- /dev/null
+++ b/.sisyphus/evidence/task-5-parse-athena.json
@@ -0,0 +1 @@
+{"description":"Work knowledge specialist. Manages Outline wiki, documentation, and knowledge organization","display_name":"Athena (Researcher)","mode":"subagent","name":"athena","permissions":{"bash":{"intent":"ask","rules":["grep *:allow","cat *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}}}
diff --git a/.sisyphus/evidence/task-5-parse-calliope.json b/.sisyphus/evidence/task-5-parse-calliope.json
new file mode 100644
index 0000000..3978052
--- /dev/null
+++ b/.sisyphus/evidence/task-5-parse-calliope.json
@@ -0,0 +1 @@
+{"description":"Writing specialist. Creates documentation, reports, meeting notes, and prose","display_name":"Calliope (Writer)","mode":"subagent","name":"calliope","permissions":{"bash":{"intent":"ask","rules":["cat *:allow","wc *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"}}}
diff --git a/.sisyphus/evidence/task-5-parse-chiron-forge.json b/.sisyphus/evidence/task-5-parse-chiron-forge.json
new file mode 100644
index 0000000..51758c3
--- /dev/null
+++ b/.sisyphus/evidence/task-5-parse-chiron-forge.json
@@ -0,0 +1 @@
+{"description":"Personal AI assistant (Build Mode). Full execution and task completion capabilities with safety prompts","display_name":"Chiron Forge (Builder)","mode":"primary","name":"chiron-forge","permissions":{"bash":{"intent":"allow","rules":["rm -rf *:ask","git reset --hard*:ask","git push*:ask","git push --force*:deny","git push -f *:deny"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}}}
diff --git a/.sisyphus/evidence/task-5-parse-chiron.json b/.sisyphus/evidence/task-5-parse-chiron.json
new file mode 100644
index 0000000..0a543dc
--- /dev/null
+++ b/.sisyphus/evidence/task-5-parse-chiron.json
@@ -0,0 +1 @@
+{"description":"Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance","display_name":"Chiron (Assistant)","mode":"primary","name":"chiron","permissions":{"bash":{"intent":"ask","rules":["git status*:allow","git log*:allow","git diff*:allow","git branch*:allow","git show*:allow","grep *:allow","ls *:allow","cat *:allow","head *:allow","tail *:allow","wc *:allow","which *:allow","echo *:allow","td *:allow","bd *:allow","nix *:allow"]},"edit":{"intent":"deny"},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}}}
diff --git a/.sisyphus/evidence/task-5-parse-hermes.json b/.sisyphus/evidence/task-5-parse-hermes.json
new file mode 100644
index 0000000..d94f512
--- /dev/null
+++ b/.sisyphus/evidence/task-5-parse-hermes.json
@@ -0,0 +1 @@
+{"description":"Work communication specialist. Handles Basecamp tasks, Outlook email, and MS Teams meetings","display_name":"Hermes (Communication)","mode":"subagent","name":"hermes","permissions":{"bash":{"intent":"ask","rules":["cat *:allow","echo *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"}}}
diff --git a/.sisyphus/evidence/task-5-prompt-diffs.txt b/.sisyphus/evidence/task-5-prompt-diffs.txt
new file mode 100644
index 0000000..e84fcbc
--- /dev/null
+++ b/.sisyphus/evidence/task-5-prompt-diffs.txt
@@ -0,0 +1,6 @@
+diff chiron: exit 0
+diff chiron-forge: exit 0
+diff hermes: exit 0
+diff athena: exit 0
+diff apollo: exit 0
+diff calliope: exit 0
diff --git a/.sisyphus/evidence/task-6-bridge-diff.txt b/.sisyphus/evidence/task-6-bridge-diff.txt
new file mode 100644
index 0000000..e69de29
diff --git a/.sisyphus/evidence/task-6-loadagents.json b/.sisyphus/evidence/task-6-loadagents.json
new file mode 100644
index 0000000..ee94f5c
--- /dev/null
+++ b/.sisyphus/evidence/task-6-loadagents.json
@@ -0,0 +1 @@
+{"apollo":{"description":"Private knowledge specialist. Manages Obsidian vault, personal notes, and private knowledge graph","display_name":"Apollo (Knowledge Management)","mode":"subagent","name":"apollo","permissions":{"bash":{"intent":"ask","rules":["cat *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"}},"systemPrompt":"You are Apollo, the Greek god of knowledge, prophecy, and light, specializing in private knowledge management.\n\n**Your Core Responsibilities:**\n1. Manage and retrieve information from Obsidian vaults and personal note systems\n2. Search, organize, and structure personal knowledge graphs\n3. Assist with personal task management embedded in private notes\n4. Bridge personal knowledge with work contexts without exposing sensitive data\n5. Manage dual-layer memory system (Mem0 + Obsidian CODEX) for persistent context across sessions\n\n**Process:**\n1. Identify which vault or note collection the user references\n2. Use the Question tool to clarify ambiguous references (specific vault, note location, file format)\n3. Search through Obsidian vault using vault-specific patterns ([[wiki-links]], tags, properties)\n4. Retrieve and synthesize information from personal notes\n5. Present findings without exposing personal details to work contexts\n6. Maintain separation between private knowledge and professional output\n\n**Quality Standards:**\n- Protect personal privacy by default: sanitize sensitive information before sharing\n- Understand Obsidian-specific syntax: [[links]], #tags, YAML frontmatter\n- Respect vault structure: folders, backlinks, unlinked references\n- Preserve context when retrieving related notes\n- Handle multiple vault configurations gracefully\n- Store valuable memories in dual-layer system: Mem0 (semantic search) + Obsidian 80-memory/ (human-readable)\n- Auto-capture session insights at session end (max 3 per session, confirm with user)\n- Retrieve relevant memories when context suggests past preferences/decisions\n- Use memory categories: preference, fact, decision, entity, other\n\n**Output Format:**\n- Summarized findings with citations to note titles (not file paths)\n- Extracted task lists with completion status\n- Related concepts and connections from the knowledge graph\n- Sanitized excerpts that exclude personal identifiers, financial data, or sensitive information\n\n**Edge Cases:**\n- Multiple vaults configured: Use Question to specify which vault\n- Unclear note references: Ask for title, keywords, or tags\n- Large result sets: Provide summary and offer filtering options\n- Nested tasks or complex dependencies: Break down into clear hierarchical view\n- Sensitive content detected: Flag it without revealing details\n- Mem0 unavailable: Warn user, continue without memory features, do not block workflow\n- Obsidian unavailable: Store in Mem0 only, log sync failure for later retry\n\n**Tool Usage:**\n- Question tool: Required when vault location is ambiguous or note reference is unclear\n- Never reveal absolute file paths or directory structures in output\n- Extract patterns and insights while obscuring specific personal details\n- Memory tools: Store/recall memories via Mem0 REST API (localhost:8000)\n- Obsidian MCP: Create memory notes in 80-memory/ with mem0_id cross-reference\n\n**Boundaries:**\n- Do NOT handle work tools (Hermes/Athena's domain)\n- Do NOT expose personal data to work contexts\n- Do NOT write long-form content (Calliope's domain)\n- Do NOT access or modify system files outside designated vault paths\n"},"athena":{"description":"Work knowledge specialist. Manages Outline wiki, documentation, and knowledge organization","display_name":"Athena (Researcher)","mode":"subagent","name":"athena","permissions":{"bash":{"intent":"ask","rules":["grep *:allow","cat *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}},"systemPrompt":"You are Athena, the Greek goddess of wisdom and strategic warfare, specializing in work knowledge management.\n\n**Your Core Responsibilities:**\n1. Manage and retrieve information from Outline wiki and team documentation systems\n2. Search, organize, and structure work knowledge graphs and documentation repositories\n3. Assist with team knowledge organization, document maintenance, and information architecture\n4. Bridge work knowledge across projects and teams while preserving context\n5. Maintain documentation structure and collection organization within Outline\n\n**Process:**\n1. Identify which collection or document the user references in Outline\n2. Use the Question tool to clarify ambiguous references (specific collection, document location, search scope)\n3. Search through Outline wiki using document titles, collections, and metadata\n4. Retrieve and synthesize information from work documents and team knowledge bases\n5. Present findings with clear citations to document titles and collections\n6. Maintain document organization and update knowledge structure when needed\n7. Suggest document organization improvements based on knowledge patterns\n\n**Quality Standards:**\n- Understand Outline-specific structure: collections, documents, sharing permissions, revision history\n- Respect wiki organization: collection hierarchy, document relationships, cross-references\n- Preserve context when retrieving related documents and sections\n- Handle multiple collection configurations gracefully\n- Maintain consistency in terminology and structure across documentation\n- Identify and suggest updates to outdated or incomplete information\n\n**Output Format:**\n- Summarized findings with citations to document titles and collection paths\n- Extracted action items, decisions, or procedures from documentation\n- Related documents and collections from the knowledge base\n- Suggestions for document organization improvements\n- Search results with relevant excerpts and context\n\n**Edge Cases:**\n- Multiple collections: Use Question to specify which collection or search across all\n- Unclear document references: Ask for title, collection name, or keywords\n- Large result sets: Provide summary and offer filtering options by collection or relevance\n- Outdated information detected: Flag documents needing updates without revealing sensitive details\n- Permission restrictions: Note which documents are inaccessible and suggest alternatives\n\n**Tool Usage:**\n- Question tool: Required when collection is ambiguous, document reference is unclear, or search scope needs clarification\n- Focus on knowledge retrieval and organization rather than creating content\n- Identify patterns in knowledge structure and suggest improvements\n\n**Boundaries:**\n- Do NOT handle short communication like messages or status updates (Hermes's domain)\n- Do NOT access or modify private knowledge systems or personal notes (Apollo's domain)\n- Do NOT write long-form creative content or prose (Calliope's domain)\n- Do NOT create new documents without explicit user request\n- Do NOT modify work tools or execute commands outside Outline operations\n\n**Collaboration:**\nWhen knowledge work requires integration with communication systems, private knowledge, or content creation, work collaboratively with relevant specialists to ensure accuracy and completeness. Your strength lies in knowledge organization and retrieval, not in communication, personal knowledge, or creative writing.\n"},"calliope":{"description":"Writing specialist. Creates documentation, reports, meeting notes, and prose","display_name":"Calliope (Writer)","mode":"subagent","name":"calliope","permissions":{"bash":{"intent":"ask","rules":["cat *:allow","wc *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"}},"systemPrompt":"You are Calliope, the Greek muse of epic poetry and eloquence, specializing in writing assistance for documentation, reports, meeting notes, and professional prose.\n\n**Your Core Responsibilities:**\n1. Draft and refine documentation with clarity, precision, and appropriate technical depth\n2. Create structured reports that organize information logically and communicate findings effectively\n3. Transform raw notes and discussions into polished meeting summaries and action items\n4. Assist with professional writing tasks including emails, proposals, and presentations\n5. Ensure consistency in tone, style, and formatting across all written materials\n\n**Process:**\n1. **Understand Context**: Identify the purpose, audience, and desired format of the document\n2. **Clarify Requirements**: Use the Question tool to confirm tone preferences (formal/casual), target audience (technical/non-technical), and specific formatting needs\n3. **Gather Information**: Request source materials, data, key points, or outline structure as needed\n4. **Draft Content**: Create initial document following established writing patterns and conventions\n5. **Refine and Polish**: Edit for clarity, conciseness, flow, and impact\n6. **Review**: Verify alignment with original requirements and quality standards\n\n**Quality Standards:**\n- Clear and concise language that communicates effectively without unnecessary complexity\n- Logical structure with appropriate headings, bullet points, and formatting\n- Consistent terminology and voice throughout the document\n- Accurate representation of source information\n- Professional tone appropriate to the context and audience\n- Grammatically correct with proper spelling and punctuation\n\n**Output Format:**\nStructure documents with clear hierarchy: main title, section headings, subheadings as needed\nUse bullet points for lists, numbered lists for sequences, and tables for comparative data\nInclude executive summaries or abstracts for longer documents\nProvide action items with owners and deadlines for meeting notes\nHighlight key findings, recommendations, or decisions prominently\n\n**Edge Cases:**\n- **Ambiguous requirements**: Ask targeted questions to clarify scope, audience, and purpose before drafting\n- **Conflicting source information**: Flag discrepancies and seek clarification rather than making assumptions\n- **Highly technical content**: Request glossary definitions or explanations for specialized terminology\n- **Multiple stakeholder audiences**: Consider creating different versions or sections for different reader needs\n- **Time-sensitive documents**: Prioritize accuracy and completeness over stylistic polish when deadlines are tight\n\n**Scope Boundaries:**\n- DO NOT execute code or run commands directly (delegate to technical agents)\n- DO NOT handle short communication like quick messages or status updates (Hermes's domain)\n- DO NOT manage wiki knowledge bases or documentation repositories (Athena's domain)\n- DO NOT make factual assertions without verifying source information\n- DO NOT write content requiring specialized domain expertise without appropriate input\n\n**Collaboration:**\nWhen writing requires integration with code repositories, technical specifications, or system knowledge, work collaboratively with relevant specialists to ensure accuracy. Your strength lies in eloquence and structure, not in technical implementation details.\n"},"chiron":{"description":"Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance","display_name":"Chiron (Assistant)","mode":"primary","name":"chiron","permissions":{"bash":{"intent":"ask","rules":["git status*:allow","git log*:allow","git diff*:allow","git branch*:allow","git show*:allow","grep *:allow","ls *:allow","cat *:allow","head *:allow","tail *:allow","wc *:allow","which *:allow","echo *:allow","td *:allow","bd *:allow","nix *:allow"]},"edit":{"intent":"deny"},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}},"systemPrompt":"You are Chiron, the wise centaur from Greek mythology, serving as the main orchestrator in plan and analysis mode. You coordinate specialized subagents and provide high-level guidance without direct execution.\n\n**Your Core Responsibilities:**\n1. Analyze user requests and determine optimal routing to specialized subagents or direct handling\n2. Provide strategic planning and analysis for complex workflows that require multiple agent capabilities\n3. Delegate tasks to appropriate subagents: Hermes (communication), Athena (work knowledge), Apollo (private knowledge), Calliope (writing)\n4. Coordinate multi-step workflows that span multiple domains and require agent collaboration\n5. Offer guidance and decision support for productivity, project management, and knowledge work\n6. Bridge personal and work contexts while maintaining appropriate boundaries between domains\n\n**Process:**\n1. **Analyze Request**: Identify the user's intent, required domains (communication, knowledge, writing, or combination), and complexity level\n2. **Clarify Ambiguity**: Use the Question tool when the request is vague, requires context, or needs clarification before proceeding\n3. **Determine Approach**: Decide whether to handle directly, delegate to a single subagent, or orchestrate multiple subagents\n4. **Delegate or Execute**: Route to appropriate subagent(s) with clear context, or provide direct analysis/guidance\n5. **Synthesize Results**: Combine outputs from multiple subagents into coherent recommendations or action plans\n6. **Provide Guidance**: Offer strategic insights, priorities, and next steps based on the analysis\n\n**Delegation Logic:**\n- **Hermes**: Work communication tasks (email drafts, message management, meeting coordination)\n- **Athena**: Work knowledge retrieval (wiki searches, documentation lookup, project information)\n- **Apollo**: Private knowledge management (Obsidian vault access, personal notes, task tracking)\n- **Calliope**: Writing assistance (documentation, reports, meeting summaries, professional prose)\n- **Chiron-Forge**: Execution tasks requiring file modifications, command execution, or direct system changes\n\n**Quality Standards:**\n- Clarify ambiguous requests before proceeding with delegation or analysis\n- Provide clear rationale when delegating to specific subagents\n- Maintain appropriate separation between personal (Apollo) and work (Athena/Hermes) domains\n- Synthesize multi-agent outputs into coherent, actionable guidance\n- Respect permission boundaries (read-only analysis, delegate execution to Chiron-Forge)\n- Offer strategic context alongside tactical recommendations\n\n**Output Format:**\nFor direct analysis: Provide structured insights with clear reasoning and recommendations\nFor delegation: State which subagent is handling the task and why\nFor orchestration: Outline the workflow, which agents are involved, and expected outcomes\nInclude next steps or decision points when appropriate\n\n**Edge Cases:**\n- **Ambiguous requests**: Use Question tool to clarify intent, scope, and preferred approach before proceeding\n- **Cross-domain requests**: Analyze which subagents are needed and delegate in sequence or parallel as appropriate\n- **Personal vs work overlap**: Explicitly maintain boundaries, route personal tasks to Apollo, work tasks to Hermes/Athena\n- **Execution required tasks**: Explain that Chiron-Forge handles execution and offer to delegate\n- **Multiple possible approaches**: Present options with trade-offs and ask for user preference\n\n**Tool Usage:**\n- Question tool: REQUIRED when requests are ambiguous, lack context, or require clarification before delegation or analysis\n- Task tool: Use to delegate to subagents (hermes, athena, apollo, calliope) with clear context and objectives\n- Read/analysis tools: Available for gathering context and providing read-only guidance\n\n**Boundaries:**\n- Do NOT modify files directly (read-only orchestrator mode)\n- Do NOT execute commands or make system changes (delegate to Chiron-Forge)\n- Do NOT handle communication drafting directly (Hermes's domain)\n- Do NOT access work documentation repositories (Athena's domain)\n- Do NOT access private vaults or personal notes (Apollo's domain)\n- Do NOT write long-form content (Calliope's domain)\n- Do NOT execute build or deployment tasks (Chiron-Forge's domain)\n"},"chiron-forge":{"description":"Personal AI assistant (Build Mode). Full execution and task completion capabilities with safety prompts","display_name":"Chiron Forge (Builder)","mode":"primary","name":"chiron-forge","permissions":{"bash":{"intent":"allow","rules":["rm -rf *:ask","git reset --hard*:ask","git push*:ask","git push --force*:deny","git push -f *:deny"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"},"websearch":{"intent":"allow"}},"systemPrompt":"You are Chiron-Forge, the Greek centaur smith of Hephaestus, specializing in execution and task completion as Chiron's build counterpart.\n\n**Your Core Responsibilities:**\n1. Execute tasks with full write access to complete planned work\n2. Modify files, run commands, and implement solutions\n3. Build and create artifacts based on Chiron's plans\n4. Delegate to specialized subagents for domain-specific work\n5. Confirm destructive operations before executing them\n\n**Process:**\n1. **Understand the Task**: Review the user's request and any plan provided by Chiron\n2. **Clarify Scope**: Use the Question tool for ambiguous requirements or destructive operations\n3. **Identify Dependencies**: Check if specialized subagent expertise is needed\n4. **Execute Work**: Use available tools to modify files, run commands, and complete tasks\n5. **Delegate to Subagents**: Use Task tool for specialized domains (Hermes for communications, Athena for knowledge, etc.)\n6. **Verify Results**: Confirm work is complete and meets quality standards\n7. **Report Completion**: Summarize what was accomplished\n\n**Quality Standards:**\n- Execute tasks accurately following specifications\n- Preserve code structure and formatting conventions\n- Confirm destructive operations before execution\n- Delegate appropriately when specialized expertise would improve quality\n- Maintain clear separation from Chiron's planning role\n\n**Output Format:**\n- Confirmation of what was executed\n- Summary of files modified or commands run\n- Verification that work is complete\n- Reference to any subagents that assisted\n\n**Edge Cases:**\n- **Destructive operations**: Use Question tool to confirm rm, git push, or similar commands\n- **Ambiguous requirements**: Ask for clarification rather than making assumptions\n- **Specialized domain work**: Recognize when tasks require Hermes, Athena, Apollo, or Calliope expertise\n- **Failed commands**: Diagnose errors, attempt fixes, and escalate when necessary\n\n**Tool Usage:**\n- Write/Edit tools: Use freely for file modifications\n- Bash tool: Execute commands, but use Question for rm, git push\n- Question tool: Required for destructive operations and ambiguous requirements\n- Task tool: Delegate to subagents for specialized domains\n- Git commands: Commit work when tasks are complete\n\n**Boundaries:**\n- DO NOT do extensive planning or analysis (that's Chiron's domain)\n- DO NOT write long-form documentation (Calliope's domain)\n- DO NOT manage private knowledge (Apollo's domain)\n- DO NOT handle work communications (Hermes's domain)\n- DO NOT execute destructive operations without confirmation\n"},"hermes":{"description":"Work communication specialist. Handles Basecamp tasks, Outlook email, and MS Teams meetings","display_name":"Hermes (Communication)","mode":"subagent","name":"hermes","permissions":{"bash":{"intent":"ask","rules":["cat *:allow","echo *:allow"]},"edit":{"intent":"allow","rules":["/run/agenix/**:deny"]},"external_directory":{"intent":"ask","rules":["~/p/**:allow","~/.config/opencode/**:allow","/tmp/**:allow","/run/agenix/**:allow"]},"question":{"intent":"allow"},"webfetch":{"intent":"allow"}},"systemPrompt":"You are Hermes, the Greek god of communication, messengers, and swift transactions, specializing in work communication across Basecamp, Outlook, and Microsoft Teams.\n\n**Your Core Responsibilities:**\n1. Manage Basecamp tasks, projects, and todo items for collaborative work\n2. Draft and send professional emails via Outlook for work-related communication\n3. Schedule and manage Microsoft Teams meetings and channel conversations\n4. Provide quick status updates and task progress reports\n5. Coordinate communication between team members across platforms\n\n**Process:**\n1. **Identify Platform**: Determine which communication tool matches the user's request (Basecamp for tasks/projects, Outlook for email, Teams for meetings/chat)\n2. **Clarify Scope**: Use the Question tool to confirm recipients, project context, or meeting details when ambiguous\n3. **Execute Communication**: Use the appropriate MCP integration (Basecamp, Outlook, or Teams) to perform the action\n4. **Confirm Action**: Provide brief confirmation of what was sent, scheduled, or updated\n5. **Maintain Professionalism**: Ensure all communication adheres to workplace norms and etiquette\n\n**Quality Standards:**\n- Clear and concise messages that respect recipient time\n- Proper platform usage: use the right tool for the right task\n- Professional tone appropriate for workplace communication\n- Accurate meeting details with correct times and participants\n- Consistent follow-up tracking for tasks requiring action\n\n**Output Format:**\n- For Basecamp: Confirm todo created/updated, message posted, or card moved\n- For Outlook: Confirm email sent with subject line and recipient count\n- For Teams: Confirm meeting scheduled with date/time or message posted in channel\n- Brief status updates without unnecessary elaboration\n\n**Edge Cases:**\n- **Multiple platforms referenced**: Use Question to confirm which platform to use\n- **Unclear recipient**: Ask for specific names, email addresses, or team details\n- **Urgent communication**: Flag high-priority items appropriately\n- **Conflicting schedules**: Propose alternative meeting times when conflicts arise\n- **Sensitive content**: Verify appropriateness before sending to broader audiences\n\n**Tool Usage:**\n- Question tool: Required when platform choice is ambiguous or recipients are unclear\n- Basecamp MCP: For project tasks, todos, message board posts, campfire messages\n- Outlook MCP: For email drafting, sending, inbox management\n- Teams MCP: For meeting scheduling, channel messages, chat conversations\n\n**Boundaries:**\n- Do NOT handle documentation repositories or wiki knowledge (Athena's domain)\n- Do NOT access personal tools or private knowledge systems (Apollo's domain)\n- Do NOT write long-form content like reports or detailed documentation (Calliope's domain)\n- Do NOT execute code or perform technical tasks outside communication workflows\n- Do NOT share sensitive information inappropriately across platforms\n"}}
diff --git a/.sisyphus/evidence/task-7-lib-agents.json b/.sisyphus/evidence/task-7-lib-agents.json
new file mode 100644
index 0000000..35e25c6
--- /dev/null
+++ b/.sisyphus/evidence/task-7-lib-agents.json
@@ -0,0 +1,39 @@
+{
+ "task": "task-7-lib-agents",
+ "status": "complete",
+ "timestamp": "2026-04-10",
+ "files": {
+ "created": [
+ "/home/m3tam3re/p/NIX/nixpkgs/lib/agents.nix"
+ ],
+ "modified": [
+ "/home/m3tam3re/p/NIX/nixpkgs/lib/default.nix"
+ ]
+ },
+ "checks": {
+ "alejandra_check": {
+ "command": "alejandra --check lib/agents.nix lib/default.nix",
+ "result": "PASS",
+ "output": "Congratulations! Your code complies with the Alejandra style."
+ },
+ "nix_flake_check": {
+ "command": "nix flake check",
+ "result": "PASS",
+ "exit_code": 0,
+ "notable": "21 flake checks ran, all derivations evaluated successfully"
+ }
+ },
+ "lib_agents_functions": {
+ "loadCanonical": "Takes { agentsInput } and returns agentsInput.lib.loadAgents",
+ "renderForOpencode": "Stub: pkgs.runCommand 'opencode-agents' {} 'echo stub > $out'",
+ "renderForClaudeCode": "Stub: pkgs.runCommand 'claude-code-agents' {} 'echo stub > $out'",
+ "renderForPi": "Stub: pkgs.runCommand 'pi-agents' {} 'echo stub > $out'",
+ "renderForTool": "Dispatcher by tool string: opencode | claude-code | pi"
+ },
+ "notes": [
+ "lib/agents.nix already existed with correct structure when task started",
+ "lib/default.nix already had the agents entry wired in",
+ "Both files passed alejandra --check without modification needed",
+ "nix flake check passed with EXIT: 0"
+ ]
+}
diff --git a/.sisyphus/notepads/harness-agnostic-migration/decisions.md b/.sisyphus/notepads/harness-agnostic-migration/decisions.md
new file mode 100644
index 0000000..e69de29
diff --git a/.sisyphus/notepads/harness-agnostic-migration/issues.md b/.sisyphus/notepads/harness-agnostic-migration/issues.md
new file mode 100644
index 0000000..4a34b80
--- /dev/null
+++ b/.sisyphus/notepads/harness-agnostic-migration/issues.md
@@ -0,0 +1,12 @@
+
+## [2026-04-10] CRITICAL: Subagent Scope Creep - Skills Deleted
+- Tasks 1 & 2 subagent DELETED skills from disk (basecamp, brainstorming, frontend-design, kestra-flow, kestra-ops, obsidian, prompt-engineering-patterns, systematic-debugging, xlsx)
+- These were NOT in scope and MUST NOT be touched per the plan
+- Skills were restored via: `git checkout HEAD -- skills/`
+- ROOT CAUSE: Subagents may try to "clean up" untracked/modified files when working in the repo
+- MITIGATION: All future delegation prompts must explicitly state "DO NOT touch skills/ directory or any existing files"
+
+## [2026-04-10] NOTE: nix eval requires --impure for builtins.readFile with absolute paths
+- Task 2 spike required `nix eval --impure --expr 'builtins.fromTOML (builtins.readFile )'`
+- This is expected for absolute filesystem paths outside the flake
+- For flake-based rendering (nixpkgs lib), this is not an issue as files go through `pkgs.writeText` or are read at flake evaluation time via `inputs`
diff --git a/.sisyphus/notepads/harness-agnostic-migration/learnings.md b/.sisyphus/notepads/harness-agnostic-migration/learnings.md
new file mode 100644
index 0000000..7e94f6b
--- /dev/null
+++ b/.sisyphus/notepads/harness-agnostic-migration/learnings.md
@@ -0,0 +1,251 @@
+# Learnings
+
+## [2026-04-10] Session Initialized
+- Plan: harness-agnostic-migration (21 tasks + 4 final)
+- AGENTS repo: /home/m3tam3re/p/AI/AGENTS
+- nixpkgs repo: /home/m3tam3re/p/NIX/nixpkgs
+- TOML chosen as canonical format (builtins.fromTOML, no IFD)
+- Renderers belong in nixpkgs, not AGENTS repo
+- 6 agents: chiron, chiron-forge, hermes, athena, apollo, calliope
+- OpenCode: file-based agents (.opencode/agent/*.md) NOT config.json embedding
+- Pi: no subagents — renders AGENTS.md + SYSTEM.md only
+- Claude Code: name must be [a-z0-9-]+ (slugified)
+- No model in agent.toml (per-machine via home-manager)
+- No MCP in agent.toml (tool-specific infrastructure)
+- No YAML files as canonical source
+- Permission model: two-level — intent (allow/deny/ask) + rules array "pattern:action"
+- mkOpencodeRules → mkCodingRules (backward-compat alias)
+- lib.mkOpencodeSkills stays unchanged
+## [2026-04-10] Task 1: Capture Golden File Baseline
+
+### Golden File Created
+- **Path**: `.sisyphus/evidence/agents-golden.json`
+- **Method**: `jq --sort-keys . agents/agents.json`
+- **Status**: ✓ Valid JSON, parseable, verified
+
+### Agent Count
+- **Total**: 6 agents
+- **Verification**: `jq 'keys | length'` → 6 ✓
+
+### Agent Names (Alphabetically Sorted)
+1. Apollo (Knowledge Management) — subagent, private knowledge specialist
+2. Athena (Researcher) — subagent, work knowledge specialist
+3. Calliope (Writer) — subagent, writing specialist
+4. Chiron (Assistant) — primary agent, plan mode
+5. Chiron Forge (Builder) — primary agent, build mode
+6. Hermes (Communication) — subagent, communication specialist
+
+### Agent Object Structure
+Every agent has 5 top-level keys:
+- `description` (string) — agent purpose and capabilities
+- `mode` (string) — "primary" or "subagent"
+- `model` (string) — LLM model ID (all use "zai-coding-plan/glm-5")
+- `prompt` (string) — reference to prompt file via `{file:./prompts/...}`
+- `permission` (object) — capability matrix with granular controls
+
+### Permission Structure
+All agents have 6 permission categories:
+- `question` → "allow" | "deny" | "ask"
+- `webfetch` → "allow" | "deny" | "ask"
+- `websearch` → "allow" | "deny" | "ask" (not all agents)
+- `edit` → nested rules (allow/deny per path pattern)
+- `bash` → nested rules (allow/deny per command pattern)
+- `external_directory` → nested rules (allow/deny per path pattern)
+
+### Baseline Purpose
+This golden file serves as the **canonical reference** for backward-compat verification in Task 8.
+It will be compared against output from the harness-agnostic bridge to ensure config integrity.
+
+### Next Steps
+- Task 8 will generate a comparable JSON from the bridge
+- Diff will be computed: `jq --sort-keys . bridge_output.json > bridge-output.json && diff agents-golden.json bridge-output.json`
+- Any structural or content changes will be flagged
+
+## [2026-04-10] Task 2: TOML Feasibility Spike
+
+**Result: ✅ PASS**
+
+### Test Execution
+- Full Chiron-Forge TOML (16 lines, 5 permission sections, 15 bash rules, 4 external_directory rules): **PARSED SUCCESSFULLY**
+- Minimal TOML (2 lines, name + description only): **PARSED SUCCESSFULLY**
+- Parser: `nix eval --impure --expr 'builtins.fromTOML (builtins.readFile )' --json`
+
+### Glob Patterns Verified
+All complex patterns preserved exactly:
+- `rm -rf *` → intact (wildcard in rule)
+- `git reset --hard*` → intact (pattern suffix)
+- `git push*` → intact (pattern suffix)
+- `git push --force*` → intact (flag + pattern)
+- `git push -f *` → intact (short flag + wildcard)
+- `~/p/**` → intact (recursive glob)
+- `~/.config/opencode/**` → intact (home + recursive)
+- `/run/agenix/**` → intact (absolute + recursive)
+- `/tmp/**` → intact (absolute + recursive)
+
+### Special Handling
+- TOML arrays of strings work perfectly for `rules` list
+- Two-level structure (`intent` + `rules`) maps cleanly from JSON nested objects
+- No datetime fields used (confirmed limitation is not a blocker for permissions schema)
+- No multi-line inline tables needed (flat key-value structure only)
+
+### Conclusion
+**✅ TOML is suitable for agent permission config.** The proposed two-level model (`intent = "allow"|"deny"|"ask"` + `rules = [...]` array) is:
+- **Parseable**: `builtins.fromTOML` handles it perfectly with `--impure` flag
+- **Pattern-safe**: All glob patterns (wildcards, recursion, flags) preserved exactly
+- **Backward-compatible**: Maps cleanly from existing JSON nested object format
+
+### Evidence Files
+- `/home/m3tam3re/p/AI/AGENTS/.sisyphus/evidence/task-2-toml-spike.json` (full Chiron-Forge parsed result)
+- `/home/m3tam3re/p/AI/AGENTS/.sisyphus/evidence/task-2-toml-minimal.json` (minimal test parsed result)
+
+### Next Steps
+No workarounds needed. Ready to implement full harness with TOML permission loader.
+
+## [2026-04-10] Task 3: Canonical Schema Designed
+- SCHEMA.md created at agents/SCHEMA.md
+- Required fields: name, description
+- Optional: display_name, mode, tags, max_turns, skills, context, rules
+- Permissions: [permissions.TOOL] with intent + rules[]
+- Supported tools: bash, edit, webfetch, websearch, question, external_directory
+- Per-renderer matrix: documented
+- Sample TOML parses: YES
+- Evidence: .sisyphus/evidence/task-3-schema-sample.toml (TOML source)
+- Evidence: .sisyphus/evidence/task-3-schema-sample-parsed.json (Nix parse result)
+
+## [2026-04-10] Task 4: OpenCode File-Based Agent Format
+
+### File Location
+- **Per-project**: `.opencode/agents/`
+- **Global**: `~/.config/opencode/agents/`
+- Per-project agents override global agents with same name
+
+### Agent Naming
+- **Filename determines agent name** — no `name` field in frontmatter
+- Example: `review.md` → agent named `review`
+- Naming convention: `[a-z0-9-]+` (lowercase, hyphens)
+
+### YAML Frontmatter Structure
+- **Required**: `description` (string)
+- **Optional**: `mode` (`primary`|`subagent`|`all`), `model`, `temperature`, `top_p`, `steps`, `disable`, `hidden`, `color`, `permission`, `task`
+- Provider-specific fields pass through to LLM (e.g., `reasoningEffort` for OpenAI)
+
+### Permission Format in Markdown
+```yaml
+permission:
+ edit:
+ "*": allow
+ "/sensitive/**": deny
+ bash:
+ "*": ask
+ "git push": deny
+ "git log*": allow
+ webfetch: allow
+ question: allow
+ websearch: allow
+ external_directory:
+ "*": ask
+ "~/p/**": allow
+```
+- Actions: `allow` | `ask` | `deny`
+- Nested rules support glob patterns (`*`, `**`, wildcards)
+- Last matching rule wins
+
+### Mode Field Values
+- `primary` — available via Tab switching
+- `subagent` — invoked via @mention or by other agents
+- `all` — flexible, can be used both ways
+
+### System Prompt Delivery
+- Markdown body (after frontmatter) IS the system prompt
+- No `{file:...}` syntax in markdown (unlike JSON config)
+- Direct markdown content → sent to LLM
+
+### Default Behaviors
+- `mode` → `all` (if omitted)
+- `model` → global config (primary agents) or parent's model (subagents)
+- `temperature` → model-specific default (0 for most, 0.55 for Qwen)
+- `permission` → full access (if omitted, all tools enabled)
+
+### Interaction with config.json
+- **Both** JSON and markdown agents are loaded
+- Markdown agents **override** JSON agents with same name
+- No conflict; complementary
+
+### KEY ADVANTAGE: Prompt Changes Don't Require home-manager switch
+- File changes → OpenCode reloads on next startup
+- NO home-manager switch needed
+- This is the primary motivation for file-based migration
+
+### Limitations
+- **No subdirectories**: only root level of `.opencode/agents/` scanned
+- **No name field**: filename is authoritative
+- **Filename must be valid**: [a-z0-9-]+ convention
+
+### Evidence File
+- `/home/m3tam3re/p/AI/AGENTS/.sisyphus/evidence/task-4-opencode-agent-format.md`
+- Complete spec with examples, frontmatter reference, permission format, YAML/JSON comparison
+
+### Confirmed Answers
+- Directory: `agents/` (both global and per-project) ✅
+- File naming: Filename determines agent name ✅
+- Required fields: `description` only ✅
+- Permission format: Nested objects like JSON ✅
+- Mode values: `primary` | `subagent` | `all` ✅
+- System prompt: Markdown body after frontmatter ✅
+- Requires HM switch for prompt changes: **NO** ✅
+- Frontmatter needs `name` field: **NO** ✅
+
+### Sources
+- https://opencode.ai/docs/agents (official documentation)
+- /home/m3tam3re/p/NIX/nixpkgs/modules/home-manager/coding/opencode.nix (current deployment)
+- /home/m3tam3re/p/AI/AGENTS/agents/agents.json (current 6 agents)
+- /home/m3tam3re/p/AI/AGENTS/AGENTS.md (repo documentation)
+
+
+## [2026-04-10] Task 4: Key Finding — OpenCode Permission Rule Precedence
+- OpenCode uses LAST-MATCHING-RULE-WINS (not first-match!)
+- This matters for renderer: when translating `rules[]` array, order must be preserved
+- The wildcard `"*"` rule becomes the fallback (keep it first in YAML output, others after)
+- OpenCode directory is `.opencode/agents/` (PLURAL), not `.opencode/agent/`
+- Global agents: `~/.config/opencode/agents/` (PLURAL too)
+- `description` is the only REQUIRED frontmatter field
+- Agent name is derived from filename (no `name` field in frontmatter)
+- Supported tools: edit, bash, webfetch, question, websearch, external_directory, task
+- `task` permission controls which subagents can be invoked (glob patterns)
+
+## [2026-04-10] Task 4: OpenCode Permission YAML Format
+The granular format is nested YAML objects, NOT a rule array:
+```yaml
+permission:
+ bash:
+ "*": ask # This is the intent/default
+ "git status*": allow # These are the rules
+ "git push*": deny
+```
+The renderer must convert from canonical `intent + rules[]` format to this nested YAML format.
+The `"*"` key always goes FIRST (as the fallback), then specific rules after it.
+
+## [2026-04-10] Task 5: All 6 agent.toml Files Created
+- Directories: agents/{chiron,chiron-forge,hermes,athena,apollo,calliope}/
+- Each has: agent.toml + system-prompt.md
+- All TOML parse: YES (6/6 verified via `nix eval --impure`)
+- Prompt diffs: all zero (6/6 byte-identical)
+- Chiron mode: primary
+- Chiron-Forge mode: primary
+- Other 4 mode: subagent
+- Commit: 7a8dd52 (12 files, 543 insertions)
+- Permission translation notes:
+ - JSON `"*"` key → TOML `intent` field (straightforward)
+ - JSON non-`"*"` keys → TOML `rules` array as `"pattern:action"` strings
+ - Simple string permissions (e.g., `"question": "allow"`) → `intent` only, no rules array
+ - Description trailing periods stripped per SCHEMA.md constraint ("no trailing period")
+ - `td *` and `bd *` bash rules in chiron preserved (custom tool aliases)
+ - No model field, no prompt field per schema exclusion rules
+
+## [2026-04-10] Task 6: loadAgents + agentsJson Bridge Complete
+- Fix applied: description = agent.description + "." (SCHEMA.md has no trailing period; golden file does)
+- All 6 agents load correctly via lib.loadAgents
+- agentsJson bridge matches golden file exactly (zero diff)
+- nix flake check: PASS
+- alejandra formatting: PASS
+- Commit: a81e178 feat: export loadAgents and backward-compat agentsJson from flake
diff --git a/.sisyphus/notepads/harness-agnostic-migration/problems.md b/.sisyphus/notepads/harness-agnostic-migration/problems.md
new file mode 100644
index 0000000..e69de29
diff --git a/.sisyphus/plans/harness-agnostic-migration.md b/.sisyphus/plans/harness-agnostic-migration.md
new file mode 100644
index 0000000..10f30ff
--- /dev/null
+++ b/.sisyphus/plans/harness-agnostic-migration.md
@@ -0,0 +1,1679 @@
+# Harness-Agnostic Agent Migration
+
+## TL;DR
+
+> **Quick Summary**: Migrate AGENTS repo from OpenCode-specific agents.json to a tool-agnostic canonical format (agent.toml + system-prompt.md per agent). Build Nix rendering pipeline in m3ta-nixpkgs that generates tool-specific configs for OpenCode, Claude Code, and Pi. Support system-level (home-manager) and project-level (flake.nix + direnv).
+>
+> **Deliverables**:
+> - 6 canonical agent definitions in AGENTS repo (TOML + Markdown)
+> - 3 tool renderers in m3ta-nixpkgs (OpenCode, Claude Code, Pi)
+> - Home-manager modules per tool replacing current opencode.nix
+> - Project-level lib functions for flake.nix + direnv usage
+> - Backward-compatible bridge during migration
+>
+> **Estimated Effort**: Large
+> **Parallel Execution**: YES — 4 waves
+> **Critical Path**: TOML spike → canonical agents → lib/agents.nix → per-tool HM modules → golden file verification
+
+---
+
+## Context
+
+### Original Request
+Restructure AGENTS repo to be harness-agnostic so the same agent definitions, skills, prompts work across OpenCode, Claude Code, Codex, Pi, and future coding agents. Build corresponding Nix infrastructure in m3ta-nixpkgs for system-level and project-level consumption.
+
+### Interview Summary
+**Key Discussions**:
+- YAML rejected for canonical format — TOML chosen (native `builtins.fromTOML`, no IFD)
+- Renderers belong in m3ta-nixpkgs, not AGENTS repo (AGENTS stays pure data)
+- OpenCode + Claude Code + Pi renderers now; Codex/Aider later on demand
+- Two-level permission model: simple intent + optional rules array for glob patterns
+- mkOpencodeRules renamed to mkCodingRules (backward-compat alias)
+- All 6 agents migrated to canonical TOML format
+- opencode.nix replaced by new per-tool modules (but non-agent OpenCode config kept separate)
+- Verification: nix flake check + rendered OpenCode output must match golden file
+- Project-level: lib functions returning derivations, usable via shellHook in devShells
+
+**Research Findings**:
+- **OpenCode**: Now supports file-based agents (`.opencode/agent/*.md` with YAML frontmatter) — modern path, avoids config.json embedding
+- **Claude Code**: Subagents require `name` (kebab-case) + `description` as mandatory frontmatter fields
+- **Pi**: No subagent concept. Uses AGENTS.md/CLAUDE.md for instructions, SYSTEM.md for prompt override, same SKILL.md format as OpenCode
+- **Codex**: config.toml + AGENTS.md only, no agent definitions — single-agent tool
+- **Aider**: .aider.conf.yml + read: lists, no agents/permissions/skills
+- **TOML in Nix**: `builtins.fromTOML` supports TOML 1.0.0 strict. No datetime fields, no multi-line inline tables.
+
+### Metis Review
+**Identified Gaps** (addressed):
+- **oh-my-opencode.json ownership**: Non-agent OpenCode config stays in slimmed opencode.nix (not in agents.nix)
+- **Pi has no subagents**: Pi renderer produces AGENTS.md + SYSTEM.md from primary agent only. Subagents skipped.
+- **Claude Code name format**: Renderer must slugify to `[a-z0-9-]+`
+- **Model string formats differ**: Need mapping table (canonical → tool-specific)
+- **OpenCode file-based agents**: Modern path via `.opencode/agent/*.md` preferred over config.json embedding
+- **TOML feasibility risk**: Must test Chiron-Forge's 15+ bash glob patterns FIRST
+- **Phase boundary enforced**: agents only (no skills/rules/MCP migration in this plan)
+- **Backward-compat bridge**: `lib.agentsJson` in AGENTS repo produces old JSON during transition
+- **Partial migration hazard**: Version check in nixpkgs to handle old AGENTS input gracefully
+
+---
+
+## Work Objectives
+
+### Core Objective
+Transform the AGENTS repository into a tool-agnostic data repository and build a Nix rendering pipeline that generates tool-specific configurations for multiple coding agents.
+
+### Concrete Deliverables
+- `AGENTS/agents/{chiron,chiron-forge,hermes,athena,apollo,calliope}/agent.toml` — 6 canonical agent definitions
+- `AGENTS/agents/{name}/system-prompt.md` — 6 system prompts (byte-identical to current .txt files)
+- `AGENTS/flake.nix` — Updated with `lib.loadAgents` and backward-compat `lib.agentsJson`
+- `nixpkgs/lib/agents.nix` — `loadCanonical` + 3 renderer functions
+- `nixpkgs/modules/home-manager/coding/agents/opencode.nix` — OpenCode HM sub-module
+- `nixpkgs/modules/home-manager/coding/agents/claude-code.nix` — Claude Code HM sub-module
+- `nixpkgs/modules/home-manager/coding/agents/pi.nix` — Pi HM sub-module
+- `nixpkgs/modules/home-manager/coding/opencode.nix` — Slimmed (non-agent config only)
+- `nixpkgs/lib/coding-rules.nix` — Renamed from opencode-rules.nix with backward-compat alias
+
+### Definition of Done
+- [ ] `nix flake check` passes on both repos
+- [ ] Rendered OpenCode agent output is semantically equivalent to current agents.json (golden file diff = 0)
+- [ ] All 6 agents parse successfully via `builtins.fromTOML`
+- [ ] Claude Code renderer produces valid MD files with required frontmatter
+- [ ] Pi renderer produces valid AGENTS.md + optional settings.json
+- [ ] System prompt content is byte-identical to current .txt files
+- [ ] `nix fmt` (alejandra) produces no changes
+- [ ] `lib.mkOpencodeSkills` still works unchanged
+
+### Must Have
+- All 6 agents in canonical TOML format with system-prompt.md
+- OpenCode renderer producing `.opencode/agent/*.md` file-based agents
+- Claude Code renderer producing `.claude/agents/*.md` with valid YAML frontmatter
+- Pi renderer producing AGENTS.md + SYSTEM.md from primary agent
+- Per-machine model overrides via home-manager
+- Backward-compatible bridge (`lib.agentsJson`) during transition
+- Project-level `renderForTool` lib function for flake.nix + direnv
+
+### Must NOT Have (Guardrails)
+- No YAML files as canonical source (TOML only — no IFD)
+- No renderer code in AGENTS repo (renderers live in nixpkgs)
+- No Codex or Aider renderers (design for extensibility, implement only 3)
+- No MCP configuration in agent.toml (MCP is tool-specific infrastructure)
+- No prompt content changes during migration (byte-identical rename only)
+- No skills/rules/context migration in this plan (separate concern)
+- No `mkOpencodeSkills` changes (stays as-is)
+- No datetime fields in TOML schema (requires experimental Nix flag)
+- No multi-line inline tables in TOML (not supported by Nix's TOML 1.0.0)
+- No generic permission translation DSL (each renderer hard-codes its own mapping)
+- No monolithic home-manager module (separate sub-module per tool)
+
+---
+
+## Verification Strategy (MANDATORY)
+
+> **ZERO HUMAN INTERVENTION** — ALL verification is agent-executed. No exceptions.
+
+### Test Decision
+- **Infrastructure exists**: YES — Nix evaluation + alejandra formatter
+- **Automated tests**: Nix eval comparison (golden file diff)
+- **Framework**: `nix eval`, `jq --sort-keys`, `diff`, `python3` for YAML validation
+
+### QA Policy
+Every task MUST include agent-executed QA scenarios.
+Evidence saved to `.sisyphus/evidence/task-{N}-{scenario-slug}.{ext}`.
+
+- **Nix evaluation**: Use Bash (nix eval) — Evaluate expressions, compare outputs
+- **File validation**: Use Bash (python3/jq) — Parse YAML frontmatter, validate JSON
+- **Content comparison**: Use Bash (diff) — Byte-identical prompt verification
+- **Formatting**: Use Bash (alejandra --check) — No formatting drift
+
+---
+
+## Execution Strategy
+
+### Parallel Execution Waves
+
+```
+Wave 1 (Start Immediately — foundation + spike):
+├── Task 1: Capture golden file baseline [quick]
+├── Task 2: TOML feasibility spike with Chiron-Forge [quick]
+├── Task 3: Design canonical agent.toml schema [deep]
+└── Task 4: Research OpenCode file-based agent frontmatter [quick]
+
+Wave 2 (After Wave 1 — AGENTS repo migration):
+├── Task 5: Create all 6 agent.toml + system-prompt.md files [unspecified-high]
+├── Task 6: Update AGENTS flake.nix with loadAgents + agentsJson [deep]
+├── Task 7: Create lib/agents.nix in nixpkgs with loadCanonical [deep]
+└── Task 8: Verify backward-compat bridge produces golden file match [quick]
+
+Wave 3 (After Wave 2 — renderers + HM modules, MAX PARALLEL):
+├── Task 9: Implement OpenCode renderer in lib/agents.nix [deep]
+├── Task 10: Implement Claude Code renderer in lib/agents.nix [deep]
+├── Task 11: Implement Pi renderer in lib/agents.nix [unspecified-high]
+├── Task 12: Create HM sub-module for OpenCode (agents/opencode.nix) [unspecified-high]
+├── Task 13: Create HM sub-module for Claude Code (agents/claude-code.nix) [unspecified-high]
+├── Task 14: Create HM sub-module for Pi (agents/pi.nix) [unspecified-high]
+├── Task 15: Slim down existing opencode.nix to non-agent config only [quick]
+└── Task 16: Rename mkOpencodeRules to mkCodingRules + backward-compat alias [quick]
+
+Wave 4 (After Wave 3 — integration + project-level + cleanup):
+├── Task 17: Add project-level renderForTool lib function [deep]
+├── Task 18: Update nixpkgs flake.nix exports + aggregator imports [quick]
+├── Task 19: Update AGENTS.md documentation [quick]
+├── Task 20: Remove legacy agents.json + prompts/*.txt from AGENTS repo [quick]
+└── Task 21: End-to-end integration test across both repos [unspecified-high]
+
+Wave FINAL (After ALL tasks — 4 parallel reviews, then user okay):
+├── Task F1: Plan compliance audit (oracle)
+├── Task F2: Code quality review (unspecified-high)
+├── Task F3: Real manual QA (unspecified-high)
+└── Task F4: Scope fidelity check (deep)
+-> Present results -> Get explicit user okay
+```
+
+### Dependency Matrix
+
+| Task | Depends On | Blocks | Wave |
+|------|-----------|--------|------|
+| 1 | — | 8, 21 | 1 |
+| 2 | — | 3, 5 | 1 |
+| 3 | 2 | 5, 6, 7 | 1 |
+| 4 | — | 9, 12 | 1 |
+| 5 | 2, 3 | 6, 8, 9, 10, 11 | 2 |
+| 6 | 3, 5 | 8, 17 | 2 |
+| 7 | 3 | 9, 10, 11, 12, 13, 14, 17 | 2 |
+| 8 | 1, 5, 6 | 20 | 2 |
+| 9 | 4, 5, 7 | 12 | 3 |
+| 10 | 5, 7 | 13 | 3 |
+| 11 | 5, 7 | 14 | 3 |
+| 12 | 9 | 18, 21 | 3 |
+| 13 | 10 | 18, 21 | 3 |
+| 14 | 11 | 18, 21 | 3 |
+| 15 | — | 18 | 3 |
+| 16 | — | 18 | 3 |
+| 17 | 6, 7 | 21 | 4 |
+| 18 | 12, 13, 14, 15, 16 | 21 | 4 |
+| 19 | 5 | — | 4 |
+| 20 | 8 | — | 4 |
+| 21 | 1, 12, 13, 14, 17, 18 | F1-F4 | 4 |
+
+### Agent Dispatch Summary
+
+- **Wave 1**: **4** — T1 → `quick`, T2 → `quick`, T3 → `deep`, T4 → `quick`
+- **Wave 2**: **4** — T5 → `unspecified-high`, T6 → `deep`, T7 → `deep`, T8 → `quick`
+- **Wave 3**: **8** — T9 → `deep`, T10 → `deep`, T11 → `unspecified-high`, T12-T14 → `unspecified-high`, T15-T16 → `quick`
+- **Wave 4**: **5** — T17 → `deep`, T18 → `quick`, T19 → `quick`, T20 → `quick`, T21 → `unspecified-high`
+- **FINAL**: **4** — F1 → `oracle`, F2 → `unspecified-high`, F3 → `unspecified-high`, F4 → `deep`
+
+---
+
+## TODOs
+
+- [x] 1. Capture Golden File Baseline
+
+ **What to do**:
+ - On the machine where home-manager is configured, capture the current rendered OpenCode agent config:
+ ```bash
+ nix eval --json '.#homeConfigurations.sk.config.programs.opencode.settings.agent' | jq --sort-keys . > /tmp/agents-golden.json
+ ```
+ - Also capture the raw agents.json for direct comparison:
+ ```bash
+ jq --sort-keys . /home/m3tam3re/p/AI/AGENTS/agents/agents.json > /tmp/agents-json-golden.json
+ ```
+ - Store both golden files in `.sisyphus/evidence/` for later use by other tasks
+ - Note: If home-manager eval isn't available, use direct file comparison as fallback
+
+ **Must NOT do**:
+ - Modify any source files
+ - Change agents.json content
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 1 (with Tasks 2, 3, 4)
+ - **Blocks**: Tasks 8, 21
+ - **Blocked By**: None
+
+ **References**:
+ - `agents/agents.json` — Current source of truth (173 lines, 6 agents)
+ - `modules/home-manager/coding/opencode.nix:148-149` in nixpkgs — Where agents.json gets embedded via `builtins.fromJSON (builtins.readFile ...)`
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Golden file captured and parseable
+ Tool: Bash
+ Preconditions: AGENTS repo at /home/m3tam3re/p/AI/AGENTS
+ Steps:
+ 1. Run: jq --sort-keys . /home/m3tam3re/p/AI/AGENTS/agents/agents.json > .sisyphus/evidence/agents-golden.json
+ 2. Run: jq 'keys | length' .sisyphus/evidence/agents-golden.json
+ 3. Assert output is exactly "6" (6 agents)
+ 4. Run: jq 'keys' .sisyphus/evidence/agents-golden.json
+ 5. Assert output contains "Chiron (Assistant)", "Chiron Forge (Builder)", "Hermes (Communication)", "Athena (Researcher)", "Apollo (Knowledge Management)", "Calliope (Writer)"
+ Expected Result: Golden file exists, contains 6 agents, valid JSON
+ Failure Indicators: jq parse error, agent count != 6, missing agent names
+ Evidence: .sisyphus/evidence/agents-golden.json
+ ```
+
+ **Commit**: NO (evidence only, no source changes)
+
+---
+
+- [x] 2. TOML Feasibility Spike — Chiron-Forge Permission Matrix
+
+ **What to do**:
+ - Write a test `agent.toml` for Chiron-Forge (the most complex agent — 2 primary modes, 15+ bash permission patterns with globs, wildcards, and special characters)
+ - Test it parses correctly with `nix eval --expr 'builtins.fromTOML (builtins.readFile ./test.toml)'`
+ - Specifically verify these tricky patterns parse in TOML strings:
+ - `"rm -rf *"` (glob with spaces)
+ - `"git reset --hard*"` (double-dash + glob)
+ - `"git push --force*"` (double-dash + glob)
+ - `"git push -f *"` (short flag + glob)
+ - `"~/p/**"` (home dir + double glob)
+ - `"/run/agenix/**"` (absolute path + double glob)
+ - Test the two-level permission schema: simple intent + rules array
+ - Write the test file to a temporary location, NOT in agents/ yet
+ - If parsing FAILS: document exactly what fails and propose schema workaround
+
+ **Must NOT do**:
+ - Create permanent files in agents/ directory (this is a spike)
+ - Modify flake.nix
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 1 (with Tasks 1, 3, 4)
+ - **Blocks**: Tasks 3, 5
+ - **Blocked By**: None
+
+ **References**:
+ - `agents/agents.json:40-68` — Chiron Forge's full permission matrix (the most complex agent)
+ - `agents/agents.json:1-38` — Chiron's permission matrix (read-only agent with extensive bash allowlist)
+ - Nix TOML docs: `builtins.fromTOML` supports TOML 1.0.0 strict (toml11 v4). No datetime, no multi-line inline tables.
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: TOML parses all permission patterns correctly
+ Tool: Bash
+ Preconditions: Nix available with builtins.fromTOML
+ Steps:
+ 1. Write test agent.toml with Chiron-Forge's full permission set to /tmp/test-agent.toml
+ 2. Run: nix eval --expr 'builtins.fromTOML (builtins.readFile /tmp/test-agent.toml)' --json
+ 3. Pipe output to jq and verify:
+ - .name == "chiron-forge"
+ - .permissions.bash.intent == "allow"
+ - .permissions.bash.rules | length >= 4 (deny rules)
+ - .permissions.edit.intent == "allow"
+ - .permissions.external_directory.rules | length >= 4
+ 4. Verify each deny pattern string is preserved exactly (no escaping issues)
+ Expected Result: All fields parse, all glob patterns preserved, all special characters intact
+ Failure Indicators: Nix eval error, missing fields, mangled glob patterns, escape issues
+ Evidence: .sisyphus/evidence/task-2-toml-spike.json
+
+ Scenario: TOML handles edge cases (empty rules, minimal agent)
+ Tool: Bash
+ Preconditions: Nix available
+ Steps:
+ 1. Write minimal agent.toml with only name + description to /tmp/test-minimal.toml
+ 2. Run: nix eval --expr 'builtins.fromTOML (builtins.readFile /tmp/test-minimal.toml)' --json
+ 3. Assert parse succeeds with only required fields
+ Expected Result: Minimal TOML parses without error
+ Failure Indicators: Nix eval error on missing optional fields
+ Evidence: .sisyphus/evidence/task-2-toml-minimal.json
+ ```
+
+ **Commit**: NO (spike only, temporary files)
+
+---
+
+- [x] 3. Design Canonical agent.toml Schema
+
+ **What to do**:
+ - Based on TOML spike results (Task 2) and research findings, define the final canonical schema
+ - Create `agents/SCHEMA.md` documenting the canonical format with:
+ - All required fields: `name` (string, kebab-case), `description` (string)
+ - All optional fields: `mode`, `tags`, `max_turns`
+ - Permission schema: `[permissions.TOOL]` tables with `intent` (allow/deny/ask) + `rules` (array of "pattern:action" strings)
+ - Skill references: `skills` array of skill names
+ - Context references: `context` array of file paths
+ - Rule references: `rules` array of rule paths (e.g. "languages/nix")
+ - NO `model` field (model is per-machine via home-manager)
+ - NO `prompt` field (prompt lives in system-prompt.md, not in TOML)
+ - NO MCP configuration (tool-specific)
+ - NO datetime fields (Nix limitation)
+ - Schema must be a SUPERSET: renderers silently drop fields they can't map
+ - Document per-renderer field support matrix in SCHEMA.md
+
+ **Must NOT do**:
+ - Include model configuration (per-machine concern)
+ - Include MCP server config (tool-specific infrastructure)
+ - Include hooks (Claude Code exclusive, not canonical)
+ - Use datetime TOML types
+ - Use multi-line inline tables
+
+ **Recommended Agent Profile**:
+ - **Category**: `deep`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (but should incorporate Task 2 results if available)
+ - **Parallel Group**: Wave 1 (with Tasks 1, 2, 4)
+ - **Blocks**: Tasks 5, 6, 7
+ - **Blocked By**: Task 2 (TOML feasibility must pass first)
+
+ **References**:
+ - Task 2's TOML spike results — Confirms which TOML patterns work
+ - `agents/agents.json` — Current field set to preserve (all 6 agents)
+ - Claude Code sub-agents docs — Required frontmatter fields: `name` (kebab-case), `description`
+ - OpenCode agent schema — Supports: name, description, mode, model, temperature, top_p, steps, permission[], color, hidden, disable
+ - Pi README — No agent schema. Uses AGENTS.md + SYSTEM.md + settings.json
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Schema document is complete and internally consistent
+ Tool: Bash
+ Preconditions: agents/SCHEMA.md created
+ Steps:
+ 1. Read agents/SCHEMA.md
+ 2. Verify it documents: required fields, optional fields, permission schema, skill references, context references
+ 3. Verify it explicitly lists: fields NOT included (model, prompt, mcp, hooks, datetime)
+ 4. Verify it includes a per-renderer support matrix table
+ 5. Write a sample agent.toml following the schema and parse it with builtins.fromTOML
+ Expected Result: Schema is complete, sample TOML parses successfully
+ Failure Indicators: Missing field documentation, sample TOML fails to parse
+ Evidence: .sisyphus/evidence/task-3-schema-sample.toml
+ ```
+
+ **Commit**: YES
+ - Message: `docs: add canonical agent.toml schema definition`
+ - Files: `agents/SCHEMA.md`
+ - Pre-commit: `nix eval --expr 'builtins.fromTOML (builtins.readFile ./evidence-sample.toml)' --json`
+
+---
+
+- [x] 4. Research OpenCode File-Based Agent Frontmatter
+
+ **What to do**:
+ - Verify OpenCode's `.opencode/agent/*.md` file-based agent format by reading source code or docs
+ - Document the exact YAML frontmatter fields supported:
+ - Which fields map from canonical agent.toml?
+ - What is the permission format in frontmatter?
+ - How does `mode: primary | subagent | all` work?
+ - What is the default behavior for omitted fields?
+ - How does file-based agent discovery interact with config.json `agent` key?
+ - Confirm that file-based agents DON'T require `home-manager switch` for prompt changes (key advantage over config.json embedding)
+ - Save findings to `.sisyphus/evidence/task-4-opencode-agent-format.md`
+
+ **Must NOT do**:
+ - Modify any files
+ - Create agent files yet (that's Task 5)
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES
+ - **Parallel Group**: Wave 1 (with Tasks 1, 2, 3)
+ - **Blocks**: Tasks 9, 12
+ - **Blocked By**: None
+
+ **References**:
+ - OpenCode GitHub: `github.com/sst/opencode` (or `anomalyco/opencode`) — search for agent loading logic
+ - OpenCode docs on file-based agents: `.opencode/agent/*.md` with YAML frontmatter
+ - Current opencode.nix:148 in nixpkgs — Shows config.json embedding approach (what we're moving away from)
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: OpenCode file-based agent format documented
+ Tool: Bash
+ Preconditions: Research complete
+ Steps:
+ 1. Read .sisyphus/evidence/task-4-opencode-agent-format.md
+ 2. Verify it documents: supported frontmatter fields, permission format, mode values, default behaviors
+ 3. Verify it confirms or denies: file-based agents don't need home-manager switch
+ Expected Result: Complete format documentation with concrete examples
+ Failure Indicators: Missing field documentation, unresolved questions about discovery behavior
+ Evidence: .sisyphus/evidence/task-4-opencode-agent-format.md
+ ```
+
+ **Commit**: NO (research only)
+
+- [x] 5. Create All 6 agent.toml + system-prompt.md Files
+
+ **What to do**:
+ - For each of the 6 agents, create `agents/{name}/agent.toml` following the schema from Task 3
+ - For each agent, copy (byte-identical!) the system prompt from `prompts/{name}.txt` to `agents/{name}/system-prompt.md`
+ - Agent directories to create: `chiron`, `chiron-forge`, `hermes`, `athena`, `apollo`, `calliope`
+ - Translate current JSON fields to TOML:
+ - `"Chiron (Assistant)"` → `name = "chiron"`, `display_name = "Chiron (Assistant)"`
+ - `"mode": "primary"` → `mode = "primary"`
+ - `"description": "..."` → `description = "..."`
+ - Permission objects → two-level `[permissions.TOOL]` tables
+ - Translate OpenCode's nested bash permission objects:
+ ```json
+ "bash": { "*": "ask", "git status*": "allow", ... }
+ ```
+ Into TOML:
+ ```toml
+ [permissions.bash]
+ intent = "ask"
+ rules = ["git status*:allow", "git log*:allow", ...]
+ ```
+ - Translate external_directory permissions similarly
+ - Skills, rules, context references per existing agent capabilities
+ - Verify EVERY toml file parses with `builtins.fromTOML`
+
+ **Must NOT do**:
+ - Change prompt content (byte-identical copy only)
+ - Include `model` field in agent.toml (per-machine concern)
+ - Delete old agents.json or prompts/ yet (Task 20)
+ - Add MCP config to agent.toml
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (depends on schema from Task 3)
+ - **Parallel Group**: Wave 2
+ - **Blocks**: Tasks 6, 8, 9, 10, 11, 19
+ - **Blocked By**: Tasks 2, 3
+
+ **References**:
+ - `agents/agents.json` — Source data for all 6 agents (173 lines)
+ - `prompts/chiron.txt` through `prompts/calliope.txt` — System prompts to copy
+ - `agents/SCHEMA.md` (from Task 3) — Canonical schema to follow
+ - Task 2 results — Confirmed TOML parsing patterns
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: All 6 agent.toml files parse without error
+ Tool: Bash
+ Preconditions: All agents/*/agent.toml created
+ Steps:
+ 1. for f in agents/*/agent.toml; do nix eval --expr "builtins.fromTOML (builtins.readFile ./$f)" --json > /dev/null && echo "OK: $f" || echo "FAIL: $f"; done
+ 2. Assert all 6 print "OK"
+ 3. Verify each has required fields: nix eval --expr '(builtins.fromTOML (builtins.readFile ./agents/chiron/agent.toml)).name' → "chiron"
+ Expected Result: All 6 TOML files parse, all have name + description
+ Failure Indicators: Any nix eval error, missing required fields
+ Evidence: .sisyphus/evidence/task-5-toml-parse-all.txt
+
+ Scenario: System prompts are byte-identical to originals
+ Tool: Bash
+ Preconditions: All agents/*/system-prompt.md created
+ Steps:
+ 1. diff prompts/chiron.txt agents/chiron/system-prompt.md
+ 2. diff prompts/chiron-forge.txt agents/chiron-forge/system-prompt.md
+ 3. diff prompts/hermes.txt agents/hermes/system-prompt.md
+ 4. diff prompts/athena.txt agents/athena/system-prompt.md
+ 5. diff prompts/apollo.txt agents/apollo/system-prompt.md
+ 6. diff prompts/calliope.txt agents/calliope/system-prompt.md
+ 7. All diffs must exit with code 0 (no differences)
+ Expected Result: Zero differences across all 6 files
+ Failure Indicators: Any diff showing changes
+ Evidence: .sisyphus/evidence/task-5-prompt-diff.txt
+
+ Scenario: Permission patterns preserved exactly
+ Tool: Bash
+ Preconditions: agents/chiron-forge/agent.toml exists
+ Steps:
+ 1. nix eval --expr '(builtins.fromTOML (builtins.readFile ./agents/chiron-forge/agent.toml)).permissions.bash.rules' --json
+ 2. Assert result contains: "rm -rf *:ask", "git reset --hard*:ask", "git push --force*:deny", "git push -f *:deny"
+ 3. nix eval --expr '(builtins.fromTOML (builtins.readFile ./agents/chiron/agent.toml)).permissions.bash.rules' --json
+ 4. Assert result contains at least 12 allow rules (git status*, git log*, etc.)
+ Expected Result: All glob patterns preserved with correct actions
+ Failure Indicators: Missing patterns, wrong action types, escaping issues
+ Evidence: .sisyphus/evidence/task-5-permissions.json
+ ```
+
+ **Commit**: YES
+ - Message: `feat: add canonical agent.toml definitions for all 6 agents`
+ - Files: `agents/*/agent.toml`, `agents/*/system-prompt.md`
+ - Pre-commit: `for f in agents/*/agent.toml; do nix eval --expr "builtins.fromTOML (builtins.readFile ./$f)" --json > /dev/null; done`
+
+---
+
+- [x] 6. Update AGENTS flake.nix with loadAgents + agentsJson Bridge
+
+ **What to do**:
+ - Add `lib.loadAgents` function to AGENTS repo's flake.nix:
+ - Reads all `agents/*/agent.toml` via `builtins.fromTOML (builtins.readFile ...)`
+ - Reads corresponding `system-prompt.md` via `builtins.readFile`
+ - Returns an attrset: `{ chiron = { name; description; mode; permissions; ...; systemPrompt = "..."; }; ... }`
+ - Discovery: reads `agents/` directory, filters for subdirs containing `agent.toml`
+ - Add `lib.agentsJson` backward-compat bridge function:
+ - Calls `loadAgents`, transforms back to current agents.json shape
+ - Maps canonical permission format back to OpenCode's nested objects
+ - Maps `systemPrompt` back to `"prompt": "{file:./prompts/chiron.txt}"` format (or inline)
+ - Adds `model` field from a configurable default (since agent.toml has no model)
+ - Keep ALL existing exports unchanged: `lib.mkOpencodeSkills`, `packages.skills-runtime`, `devShells.default`
+ - `lib` export must be system-independent (no `forAllSystems` wrapper — pure functions)
+
+ **Must NOT do**:
+ - Change mkOpencodeSkills
+ - Remove any existing exports
+ - Add renderer logic (that goes in nixpkgs)
+ - Hardcode machine-specific model assignments
+
+ **Recommended Agent Profile**:
+ - **Category**: `deep`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Task 7)
+ - **Parallel Group**: Wave 2
+ - **Blocks**: Tasks 8, 17
+ - **Blocked By**: Tasks 3, 5
+
+ **References**:
+ - `flake.nix` — Current AGENTS flake (188 lines). Keep structure, add to `lib` section.
+ - `flake.nix:52-123` — `lib.mkOpencodeSkills` pattern (linkFarm approach)
+ - `agents/agents.json` — Target output shape for agentsJson bridge function
+ - `agents/SCHEMA.md` (from Task 3) — Canonical schema definition
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: loadAgents returns all 6 agents with correct structure
+ Tool: Bash
+ Preconditions: Task 5 complete, flake.nix updated
+ Steps:
+ 1. nix eval --json '.#lib.loadAgents' | jq 'keys | length'
+ 2. Assert output is 6
+ 3. nix eval --json '.#lib.loadAgents' | jq '.chiron.name'
+ 4. Assert output is "chiron"
+ 5. nix eval --json '.#lib.loadAgents' | jq '.["chiron-forge"].permissions.bash.intent'
+ 6. Assert output is "allow"
+ Expected Result: 6 agents loaded, all fields present
+ Failure Indicators: Eval error, wrong agent count, missing fields
+ Evidence: .sisyphus/evidence/task-6-loadagents.json
+
+ Scenario: agentsJson bridge matches golden file
+ Tool: Bash
+ Preconditions: Golden file from Task 1 available
+ Steps:
+ 1. nix eval --json '.#lib.agentsJson' | jq --sort-keys . > /tmp/agents-bridge-output.json
+ 2. diff /tmp/agents-bridge-output.json .sisyphus/evidence/agents-golden.json
+ 3. Exit code must be 0
+ Expected Result: Bridge output is semantically identical to original agents.json
+ Failure Indicators: Any diff (key ordering differences are OK if using jq --sort-keys)
+ Evidence: .sisyphus/evidence/task-6-bridge-diff.txt
+ ```
+
+ **Commit**: YES
+ - Message: `feat: export loadAgents and backward-compat agentsJson from flake`
+ - Files: `flake.nix`
+ - Pre-commit: `nix flake check && nix eval --json '.#lib.loadAgents' > /dev/null`
+
+---
+
+- [x] 7. Create lib/agents.nix in nixpkgs with loadCanonical
+
+ **What to do**:
+ - Create `/home/m3tam3re/p/NIX/nixpkgs/lib/agents.nix` with:
+ - `loadCanonical { agentsInput }` — reads canonical agents from the AGENTS flake input
+ - Calls `agentsInput.lib.loadAgents` (from Task 6) to get canonical attrset
+ - Returns the canonical attrset (or wraps/validates it)
+ - Stub functions for renderers (to be implemented in Tasks 9-11):
+ - `renderForOpencode { canonical; modelOverrides ? {}; }` → derivation placeholder
+ - `renderForClaudeCode { canonical; modelOverrides ? {}; }` → derivation placeholder
+ - `renderForPi { canonical; }` → derivation placeholder
+ - `renderForTool { agentsInput; tool; modelOverrides ? {}; }` → dispatcher
+ - Wire into `lib/default.nix` alongside existing `ports` and `opencode-rules`
+ - Follow existing lib patterns: `{lib}: { ... }` function signature
+
+ **Must NOT do**:
+ - Implement actual renderers yet (stubs only — Tasks 9-11)
+ - Import pkgs at lib level (renderers need pkgs, but the interface should accept it as argument)
+ - Modify existing lib functions
+
+ **Recommended Agent Profile**:
+ - **Category**: `deep`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Task 6)
+ - **Parallel Group**: Wave 2
+ - **Blocks**: Tasks 9, 10, 11, 12, 13, 14, 17
+ - **Blocked By**: Task 3
+
+ **References**:
+ - `lib/default.nix` in nixpkgs — Current lib exports (ports, opencode-rules). Add agents here.
+ - `lib/opencode-rules.nix` in nixpkgs — Pattern reference for lib function structure (116 lines)
+ - Task 3 SCHEMA.md — Defines the canonical attrset shape
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: lib/agents.nix loads and stub functions exist
+ Tool: Bash
+ Preconditions: lib/agents.nix created, lib/default.nix updated
+ Steps:
+ 1. nix eval --expr '(import ./lib { lib = (import {}).lib; }).agents' --json
+ 2. Assert result contains keys: loadCanonical, renderForOpencode, renderForClaudeCode, renderForPi, renderForTool
+ 3. nix flake check /home/m3tam3re/p/NIX/nixpkgs
+ 4. Assert exit code 0
+ Expected Result: All stub functions exist, flake check passes
+ Failure Indicators: Import error, missing function names, flake check failure
+ Evidence: .sisyphus/evidence/task-7-lib-agents.json
+ ```
+
+ **Commit**: YES
+ - Message: `feat(lib): add agents.nix with loadCanonical and renderer stubs`
+ - Files: `lib/agents.nix`, `lib/default.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [x] 8. Verify Backward-Compat Bridge Produces Golden File Match
+
+ **What to do**:
+ - With Tasks 1, 5, 6 complete, run the full backward-compat verification:
+ ```bash
+ nix eval --json '.#lib.agentsJson' | jq --sort-keys . > /tmp/bridge-output.json
+ diff /tmp/bridge-output.json .sisyphus/evidence/agents-golden.json
+ ```
+ - If diff shows differences, debug and fix in Task 6's `agentsJson` function
+ - This is a GATE: do NOT proceed to Wave 3 until this passes
+ - Document any semantic differences that are acceptable (e.g., key ordering)
+
+ **Must NOT do**:
+ - Modify the golden file
+ - Accept content differences as "close enough"
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (gate task)
+ - **Parallel Group**: Wave 2 (runs last in wave)
+ - **Blocks**: Task 20
+ - **Blocked By**: Tasks 1, 5, 6
+
+ **References**:
+ - `.sisyphus/evidence/agents-golden.json` — Golden file from Task 1
+ - Task 6's `lib.agentsJson` — Bridge function to verify
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Bridge output matches golden file exactly
+ Tool: Bash
+ Preconditions: Tasks 1, 5, 6 complete
+ Steps:
+ 1. cd /home/m3tam3re/p/AI/AGENTS
+ 2. nix eval --json '.#lib.agentsJson' | jq --sort-keys . > /tmp/bridge-verify.json
+ 3. diff /tmp/bridge-verify.json .sisyphus/evidence/agents-golden.json
+ 4. Assert exit code 0 (zero differences)
+ Expected Result: Byte-identical output after jq normalization
+ Failure Indicators: Any diff output, non-zero exit code
+ Evidence: .sisyphus/evidence/task-8-bridge-verify.txt
+ ```
+
+ **Commit**: NO (verification only)
+
+- [x] 9. Implement OpenCode Renderer in lib/agents.nix
+
+ **What to do**:
+ - Replace the `renderForOpencode` stub in `lib/agents.nix` with full implementation
+ - Renderer produces a derivation containing `.opencode/agent/*.md` files (file-based agents)
+ - For each agent in canonical attrset, generate a markdown file with YAML frontmatter:
+ - `name`: from canonical `name`
+ - `description`: from canonical `description`
+ - `mode`: from canonical `mode` (primary/subagent) — MUST set explicitly (OpenCode defaults to "all")
+ - `model`: from `modelOverrides.{name}` if present, otherwise omit (let OpenCode use its default)
+ - `permission`: translate canonical two-level permissions to OpenCode's rule array format:
+ ```yaml
+ permission:
+ - permission: bash
+ pattern: "git status*"
+ action: allow
+ - permission: bash
+ pattern: "*"
+ action: ask
+ ```
+ - `steps`: from canonical `max_turns` if present
+ - Body of markdown = content of `system-prompt.md`
+ - Use `pkgs.writeText` or `pkgs.runCommand` to generate each file, then `pkgs.linkFarm` or `pkgs.symlinkJoin` to combine
+ - Handle edge case: agent with no permission rules (omit permission key entirely)
+
+ **Must NOT do**:
+ - Embed agents in config.json (use file-based agent path)
+ - Include oh-my-opencode.json or plugin config (that stays in opencode.nix)
+ - Hard-code model values
+
+ **Recommended Agent Profile**:
+ - **Category**: `deep`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Tasks 10, 11, 15, 16)
+ - **Parallel Group**: Wave 3
+ - **Blocks**: Task 12
+ - **Blocked By**: Tasks 4, 5, 7
+
+ **References**:
+ - Task 4 evidence — OpenCode file-based agent frontmatter format
+ - `agents/agents.json` — Current permission format (for output comparison)
+ - `lib/agents.nix` stubs from Task 7 — Replace renderForOpencode
+ - OpenCode source: agent file discovery logic
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Rendered OpenCode agents have correct frontmatter
+ Tool: Bash
+ Preconditions: Renderer implemented, AGENTS repo with canonical agents
+ Steps:
+ 1. Build rendered output: nix eval --raw '.#lib.x86_64-linux.agents.renderForOpencode { canonical = inputs.agents.lib.loadAgents; }'
+ 2. List files in rendered output: expect 6 .md files
+ 3. For chiron.md: extract YAML frontmatter, verify name="chiron", mode="primary", description present
+ 4. For chiron-forge.md: verify permission rules contain bash deny patterns for "rm -rf *", "git push --force*"
+ 5. For hermes.md: verify mode="subagent"
+ Expected Result: 6 agent files, correct frontmatter, correct permissions
+ Failure Indicators: Missing files, wrong mode, missing permissions
+ Evidence: .sisyphus/evidence/task-9-opencode-render.txt
+
+ Scenario: System prompts appear as markdown body
+ Tool: Bash
+ Preconditions: Rendered output available
+ Steps:
+ 1. Extract body (after frontmatter) from rendered chiron.md
+ 2. Compare with agents/chiron/system-prompt.md
+ 3. Assert byte-identical
+ Expected Result: Prompt content unchanged through rendering
+ Failure Indicators: Any content difference
+ Evidence: .sisyphus/evidence/task-9-prompt-body.txt
+ ```
+
+ **Commit**: YES (groups with N1)
+ - Message: `feat(lib): implement OpenCode renderer in agents.nix`
+ - Files: `lib/agents.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 10. Implement Claude Code Renderer in lib/agents.nix
+
+ **What to do**:
+ - Replace `renderForClaudeCode` stub with full implementation
+ - Renderer produces a derivation containing `.claude/agents/*.md` files AND `.claude/settings.json` fragment
+ - For each agent, generate markdown with YAML frontmatter:
+ - `name`: slugify canonical name to `[a-z0-9-]+` (e.g., "chiron-forge" stays, "Chiron" → "chiron")
+ - `description`: from canonical `description` (REQUIRED by Claude Code)
+ - `model`: from `modelOverrides.{name}` → map to Claude Code alias (sonnet/opus/haiku) or full ID
+ - `tools`: from canonical permissions — collect tool names where intent=allow into allowlist
+ - `disallowedTools`: from canonical permissions — collect tool names where intent=deny
+ - `permissionMode`: default to "default" unless canonical specifies
+ - `maxTurns`: from canonical `max_turns`
+ - `skills`: from canonical `skills` array
+ - Generate `.claude/settings.json` with permission rules translated to Claude Code DSL:
+ - Canonical `bash: { rules: ["git push*:deny"] }` → `permissions.deny: ["Bash(git push*)"]`
+ - Canonical `edit: { intent: "allow" }` → `permissions.allow: ["Edit"]`
+ - Body of markdown = content of `system-prompt.md`
+ - Handle subagent-only agents: Claude Code agents are always subagents (no "primary" mode)
+
+ **Must NOT do**:
+ - Include MCP server config in settings.json
+ - Include hooks (Claude Code exclusive concept, not in canonical)
+ - Use non-kebab-case names (Claude Code requires [a-z0-9-]+)
+
+ **Recommended Agent Profile**:
+ - **Category**: `deep`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Tasks 9, 11, 15, 16)
+ - **Parallel Group**: Wave 3
+ - **Blocks**: Task 13
+ - **Blocked By**: Tasks 5, 7
+
+ **References**:
+ - Claude Code sub-agents docs (indexed) — Frontmatter fields, tool names, permission syntax
+ - Claude Code settings docs (indexed) — Permission rule DSL: `"Bash(git diff *)"`, `"Read(./.env)"`
+ - `lib/agents.nix` stubs from Task 7
+ - Claude Code tool names: `Bash`, `Read`, `Edit`, `Write`, `Glob`, `Grep`, `WebFetch`, `Agent(type)`, `MCP(server::tool)`
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: All Claude Code agent files have valid YAML frontmatter
+ Tool: Bash
+ Preconditions: Renderer implemented
+ Steps:
+ 1. Build rendered output for claude-code
+ 2. For each .md file in .claude/agents/:
+ python3 -c "
+ import yaml, sys
+ content = open(sys.argv[1]).read()
+ parts = content.split('---', 2)
+ fm = yaml.safe_load(parts[1])
+ assert 'name' in fm, 'Missing name'
+ assert 'description' in fm, 'Missing description'
+ assert fm['name'].replace('-','').isalnum(), f'Invalid name: {fm[\"name\"]}'
+ print(f'OK: {fm[\"name\"]}')
+ " file.md
+ 3. Assert all 6 files pass
+ Expected Result: All frontmatter valid, names in kebab-case
+ Failure Indicators: YAML parse error, missing required fields, invalid name format
+ Evidence: .sisyphus/evidence/task-10-claude-frontmatter.txt
+
+ Scenario: Permission DSL correctly translated
+ Tool: Bash
+ Preconditions: Rendered .claude/settings.json exists
+ Steps:
+ 1. Read rendered .claude/settings.json
+ 2. Verify permissions.deny contains patterns like "Bash(rm -rf *)" for chiron-forge deny rules
+ 3. Verify permissions.allow contains patterns like "Bash(git status*)" for chiron allow rules
+ Expected Result: Permission rules correctly translated to Claude Code DSL
+ Failure Indicators: Missing rules, wrong DSL format, lost patterns
+ Evidence: .sisyphus/evidence/task-10-claude-permissions.json
+ ```
+
+ **Commit**: YES (groups with N1)
+ - Message: `feat(lib): implement Claude Code renderer in agents.nix`
+ - Files: `lib/agents.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 11. Implement Pi Renderer in lib/agents.nix
+
+ **What to do**:
+ - Replace `renderForPi` stub with full implementation
+ - Pi has NO subagent concept — renderer produces DIFFERENT outputs than OpenCode/Claude Code:
+ - `AGENTS.md` — Concatenation of all agent descriptions + primary agent's instructions
+ - `~/.pi/agent/SYSTEM.md` or `.pi/SYSTEM.md` — Primary agent's system prompt (replaces Pi's default prompt)
+ - `.pi/settings.json` fragment — Optional: tools list, model config
+ - Skill symlinks — Pi uses same SKILL.md dirs at `~/.pi/agent/skills/` or `.agents/skills/`
+ - Only PRIMARY agents render to SYSTEM.md. Subagent prompts get embedded as sections in AGENTS.md.
+ - Generate AGENTS.md with sections per agent:
+ ```markdown
+ # Agent Instructions
+
+ ## Chiron (Assistant)
+ Primary assistant for read-only analysis...
+
+ ## Available Specialists
+ - Hermes: Work communication (Basecamp, Outlook, Teams)
+ - Athena: Work knowledge (Outline wiki)
+ ...
+ ```
+ - Pi's tools config: `--tools read,bash,edit,write` maps from canonical permissions
+ - Handle: Pi has no permission granularity — only tool enable/disable
+
+ **Must NOT do**:
+ - Create agent files (Pi doesn't have them)
+ - Try to render subagents as separate entities
+ - Include MCP config (Pi uses extensions instead)
+ - Create TS extensions (out of scope)
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Tasks 9, 10, 15, 16)
+ - **Parallel Group**: Wave 3
+ - **Blocks**: Task 14
+ - **Blocked By**: Tasks 5, 7
+
+ **References**:
+ - Pi README (indexed) — AGENTS.md loaded at startup, SYSTEM.md replaces default prompt, APPEND_SYSTEM.md appends
+ - Pi README skills section — `~/.pi/agent/skills/`, `.pi/skills/`, `~/.agents/skills/`, `.agents/skills/`
+ - Pi README settings — `~/.pi/agent/settings.json` (global), `.pi/settings.json` (project)
+ - Pi README tools — `--tools read,bash,edit,write` (default built-in tools)
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Pi renderer produces valid AGENTS.md
+ Tool: Bash
+ Preconditions: Renderer implemented
+ Steps:
+ 1. Build rendered output for pi
+ 2. Assert AGENTS.md exists in output
+ 3. Assert AGENTS.md contains "Chiron" and agent descriptions
+ 4. Assert AGENTS.md contains specialist listing
+ 5. Verify AGENTS.md is valid Markdown (no TOML/JSON artifacts)
+ Expected Result: AGENTS.md exists with readable agent instructions
+ Failure Indicators: File missing, contains raw TOML/JSON, empty content
+ Evidence: .sisyphus/evidence/task-11-pi-agents-md.txt
+
+ Scenario: Pi renderer produces valid SYSTEM.md from primary agent
+ Tool: Bash
+ Preconditions: Rendered output available
+ Steps:
+ 1. Assert SYSTEM.md or .pi/SYSTEM.md exists in output
+ 2. Content should be the primary agent's system prompt
+ 3. Verify it matches agents/chiron/system-prompt.md content
+ Expected Result: SYSTEM.md exists with primary agent's prompt
+ Failure Indicators: Missing file, wrong content, subagent prompt instead
+ Evidence: .sisyphus/evidence/task-11-pi-system-md.txt
+ ```
+
+ **Commit**: YES (groups with N1)
+ - Message: `feat(lib): implement Pi renderer in agents.nix`
+ - Files: `lib/agents.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 12. Create HM Sub-Module for OpenCode (agents/opencode.nix)
+
+ **What to do**:
+ - Create `modules/home-manager/coding/agents/opencode.nix` in nixpkgs
+ - Options under `coding.agents.opencode`:
+ - `enable` — mkEnableOption
+ - `agentsInput` — flake input pointing to AGENTS repo
+ - `modelOverrides` — attrset mapping agent name → model string (e.g., `{ chiron = "anthropic/claude-sonnet-4"; }`)
+ - Config (mkIf enabled):
+ - Call `lib.agents.renderForOpencode { canonical; modelOverrides; }` to get rendered derivation
+ - Symlink rendered `.opencode/agent/` dir via `xdg.configFile` or `home.file`
+ - Symlink skills via existing `mkOpencodeSkills` (if agentsInput set)
+ - Symlink context/ and commands/ from AGENTS input
+ - Create `modules/home-manager/coding/agents/default.nix` aggregator importing opencode.nix, claude-code.nix, pi.nix
+ - Update `modules/home-manager/coding/default.nix` to import `./agents` subdir
+
+ **Must NOT do**:
+ - Handle oh-my-opencode.json, plugins, formatters (stays in slimmed opencode.nix)
+ - Embed agents in config.json (use file-based agents)
+ - Include MCP config
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (depends on Task 9)
+ - **Parallel Group**: Wave 3 (after Task 9)
+ - **Blocks**: Tasks 18, 21
+ - **Blocked By**: Task 9
+
+ **References**:
+ - `modules/home-manager/coding/opencode.nix` in nixpkgs — Current module to learn from (168 lines)
+ - Task 9's renderer — Produces the derivation this module consumes
+ - `modules/home-manager/AGENTS.md` in nixpkgs — Module conventions doc
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Module options type-check
+ Tool: Bash
+ Preconditions: Module created
+ Steps:
+ 1. nix flake check /home/m3tam3re/p/NIX/nixpkgs
+ 2. Assert exit code 0 (no type errors in module options)
+ Expected Result: Flake check passes
+ Failure Indicators: Type error in options, missing import
+ Evidence: .sisyphus/evidence/task-12-flake-check.txt
+ ```
+
+ **Commit**: YES (groups with N2)
+ - Message: `feat(hm): add OpenCode agent sub-module`
+ - Files: `modules/home-manager/coding/agents/opencode.nix`, `modules/home-manager/coding/agents/default.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 13. Create HM Sub-Module for Claude Code (agents/claude-code.nix)
+
+ **What to do**:
+ - Create `modules/home-manager/coding/agents/claude-code.nix` in nixpkgs
+ - Options under `coding.agents.claude-code`:
+ - `enable` — mkEnableOption
+ - `agentsInput` — flake input pointing to AGENTS repo
+ - `modelOverrides` — attrset mapping agent name → model alias or ID
+ - Config (mkIf enabled):
+ - Call `lib.agents.renderForClaudeCode { canonical; modelOverrides; }` to get rendered derivation
+ - Symlink rendered `.claude/agents/` via `home.file`
+ - Generate `.claude/settings.json` with rendered permission rules
+ - Symlink skills from AGENTS repo to `~/.claude/skills/` (if skills exist)
+ - Handle: Claude Code's `CLAUDE.md` — optionally generate from agent instructions
+
+ **Must NOT do**:
+ - Manage Claude Code's MCP config (.claude.json)
+ - Configure Claude Code API keys or auth
+ - Include hooks in settings.json
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (depends on Task 10)
+ - **Parallel Group**: Wave 3 (after Task 10)
+ - **Blocks**: Tasks 18, 21
+ - **Blocked By**: Task 10
+
+ **References**:
+ - Claude Code settings docs (indexed) — `.claude/` directory structure, settings.json scopes
+ - Task 10's renderer — Produces the derivation this module consumes
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Module type-checks and creates expected file paths
+ Tool: Bash
+ Preconditions: Module created
+ Steps:
+ 1. nix flake check /home/m3tam3re/p/NIX/nixpkgs
+ 2. Assert exit code 0
+ Expected Result: Flake check passes
+ Failure Indicators: Type error, missing import
+ Evidence: .sisyphus/evidence/task-13-flake-check.txt
+ ```
+
+ **Commit**: YES (groups with N2)
+ - Message: `feat(hm): add Claude Code agent sub-module`
+ - Files: `modules/home-manager/coding/agents/claude-code.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 14. Create HM Sub-Module for Pi (agents/pi.nix)
+
+ **What to do**:
+ - Create `modules/home-manager/coding/agents/pi.nix` in nixpkgs
+ - Options under `coding.agents.pi`:
+ - `enable` — mkEnableOption
+ - `agentsInput` — flake input pointing to AGENTS repo
+ - Config (mkIf enabled):
+ - Call `lib.agents.renderForPi { canonical; }` to get rendered derivation
+ - Place AGENTS.md at `~/.pi/agent/AGENTS.md` via `home.file`
+ - Place SYSTEM.md at `~/.pi/agent/SYSTEM.md` via `home.file`
+ - Symlink skills from AGENTS repo to `~/.pi/agent/skills/`
+ - Optionally symlink prompts from AGENTS repo to `~/.pi/agent/prompts/` (Pi's prompt templates)
+
+ **Must NOT do**:
+ - Create fake agent files (Pi has no subagents)
+ - Configure Pi extensions (TypeScript, out of scope)
+ - Manage Pi's package system
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (depends on Task 11)
+ - **Parallel Group**: Wave 3 (after Task 11)
+ - **Blocks**: Tasks 18, 21
+ - **Blocked By**: Task 11
+
+ **References**:
+ - Pi README (indexed) — `~/.pi/agent/` directory structure, skill paths, prompt template paths
+ - Task 11's renderer — Produces the derivation this module consumes
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Module type-checks
+ Tool: Bash
+ Preconditions: Module created
+ Steps:
+ 1. nix flake check /home/m3tam3re/p/NIX/nixpkgs
+ 2. Assert exit code 0
+ Expected Result: Flake check passes
+ Failure Indicators: Type error, missing import
+ Evidence: .sisyphus/evidence/task-14-flake-check.txt
+ ```
+
+ **Commit**: YES (groups with N2)
+ - Message: `feat(hm): add Pi agent sub-module`
+ - Files: `modules/home-manager/coding/agents/pi.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 15. Slim Down opencode.nix to Non-Agent Config Only
+
+ **What to do**:
+ - Edit existing `modules/home-manager/coding/opencode.nix` in nixpkgs
+ - REMOVE: `agentsInput` option (moved to agents/opencode.nix)
+ - REMOVE: `externalSkills` option (moved to agents/opencode.nix)
+ - REMOVE: Skills linkFarm generation (moved to agents/opencode.nix)
+ - REMOVE: Context/commands/prompts symlinks (moved to agents/opencode.nix)
+ - REMOVE: agents.json embedding (`builtins.fromJSON (builtins.readFile ...)`) (replaced by file-based agents)
+ - KEEP: `programs.opencode.enable` + `enableMcpIntegration`
+ - KEEP: `programs.opencode.settings` with theme, formatter, plugin array
+ - KEEP: `ohMyOpencodeSettings` → `oh-my-opencode.json` generation
+ - KEEP: `extraSettings` for provider/machine-specific config
+ - KEEP: `extraPlugins`
+ - Result: opencode.nix handles ONLY tool-specific, non-agent config
+
+ **Must NOT do**:
+ - Delete opencode.nix entirely (it still handles non-agent concerns)
+ - Move oh-my-opencode.json to agents module
+ - Change option names that other configs depend on (check consumers first)
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Tasks 9-14, 16)
+ - **Parallel Group**: Wave 3
+ - **Blocks**: Task 18
+ - **Blocked By**: None (but coordinate with Task 12 for import order)
+
+ **References**:
+ - `modules/home-manager/coding/opencode.nix` in nixpkgs — Current module (168 lines, will shrink to ~70)
+ - Task 12 — New agents/opencode.nix takes over agent concerns
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Slimmed opencode.nix still manages non-agent config
+ Tool: Bash
+ Preconditions: opencode.nix edited
+ Steps:
+ 1. Verify opencode.nix still has: ohMyOpencodeSettings, extraSettings, extraPlugins options
+ 2. Verify opencode.nix does NOT have: agentsInput, externalSkills options
+ 3. nix flake check /home/m3tam3re/p/NIX/nixpkgs
+ 4. Assert exit code 0
+ Expected Result: Module compiles, non-agent options preserved, agent options removed
+ Failure Indicators: Missing options that should stay, leftover agent options
+ Evidence: .sisyphus/evidence/task-15-opencode-slim.txt
+ ```
+
+ **Commit**: YES (groups with N3)
+ - Message: `refactor(hm): slim opencode.nix to non-agent config only`
+ - Files: `modules/home-manager/coding/opencode.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 16. Rename mkOpencodeRules to mkCodingRules with Backward-Compat Alias
+
+ **What to do**:
+ - In `lib/opencode-rules.nix`: rename the main function to `mkCodingRules`
+ - Add backward-compat alias: `mkOpencodeRules = builtins.trace "m3ta-nixpkgs: mkOpencodeRules is deprecated, use mkCodingRules" mkCodingRules;`
+ - Rename file: `lib/opencode-rules.nix` → `lib/coding-rules.nix`
+ - Update `lib/default.nix`: expose both `coding-rules.mkCodingRules` and `opencode-rules.mkOpencodeRules` (alias)
+ - For now, the function body stays identical — tool-agnostic rule rendering is a future enhancement
+
+ **Must NOT do**:
+ - Change the function's behavior or output
+ - Remove the old name entirely (backward compat)
+ - Add multi-tool rendering logic yet
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Tasks 9-15)
+ - **Parallel Group**: Wave 3
+ - **Blocks**: Task 18
+ - **Blocked By**: None
+
+ **References**:
+ - `lib/opencode-rules.nix` in nixpkgs — Current file (116 lines)
+ - `lib/default.nix` in nixpkgs — Current exports
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Both old and new names work
+ Tool: Bash
+ Preconditions: Rename complete
+ Steps:
+ 1. nix eval --expr '(import ./lib { lib = (import {}).lib; }).coding-rules.mkCodingRules' --json
+ 2. Assert function exists (no error)
+ 3. nix eval --expr '(import ./lib { lib = (import {}).lib; }).opencode-rules.mkOpencodeRules' --json
+ 4. Assert function exists (backward compat, may show deprecation trace)
+ Expected Result: Both names resolve to the same function
+ Failure Indicators: Either name fails, function behavior changed
+ Evidence: .sisyphus/evidence/task-16-rules-rename.txt
+ ```
+
+ **Commit**: YES (groups with N5)
+ - Message: `refactor(lib): rename mkOpencodeRules to mkCodingRules with compat alias`
+ - Files: `lib/coding-rules.nix`, `lib/default.nix`
+ - Pre-commit: `nix flake check`
+
+- [ ] 17. Add Project-Level renderForTool Lib Function
+
+ **What to do**:
+ - Implement `renderForTool` dispatcher in `lib/agents.nix`:
+ ```nix
+ renderForTool = { pkgs, agentsInput, tool, modelOverrides ? {} }:
+ let
+ canonical = agentsInput.lib.loadAgents;
+ renderers = {
+ opencode = renderForOpencode { inherit pkgs canonical modelOverrides; };
+ claude-code = renderForClaudeCode { inherit pkgs canonical modelOverrides; };
+ pi = renderForPi { inherit pkgs canonical; };
+ };
+ in renderers.${tool} or (throw "Unknown tool: ${tool}");
+ ```
+ - Add `shellHookForTool` helper that generates a shellHook placing rendered files in project dir:
+ - For OpenCode: symlinks `.opencode/agent/` → rendered derivation
+ - For Claude Code: symlinks `.claude/agents/` and `.claude/settings.json` → rendered
+ - For Pi: symlinks `.pi/SYSTEM.md` and `AGENTS.md` → rendered
+ - All shellHooks add appropriate entries to `.gitignore` if not already present
+ - Export via `lib.agents.renderForTool` and `lib.agents.shellHookForTool`
+ - Usage in project flake.nix:
+ ```nix
+ devShells.default = pkgs.mkShell {
+ shellHook = m3taLib.agents.shellHookForTool {
+ inherit pkgs;
+ agentsInput = inputs.agents;
+ tool = "opencode";
+ modelOverrides = { chiron = "anthropic/claude-sonnet-4"; };
+ };
+ };
+ ```
+
+ **Must NOT do**:
+ - Write files imperatively (use symlinks to Nix store paths)
+ - Assume tool from environment (require explicit `tool` argument)
+
+ **Recommended Agent Profile**:
+ - **Category**: `deep`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO
+ - **Parallel Group**: Wave 4
+ - **Blocks**: Task 21
+ - **Blocked By**: Tasks 6, 7 (and implicitly 9-11)
+
+ **References**:
+ - `lib/opencode-rules.nix:106-114` in nixpkgs — shellHook pattern reference (ln -sfn + cat > file)
+ - Task 9-11 renderers — Functions this dispatcher calls
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: renderForTool dispatches correctly
+ Tool: Bash
+ Preconditions: All renderers implemented
+ Steps:
+ 1. nix eval --raw '.#lib.x86_64-linux.agents.renderForTool { pkgs = import {}; agentsInput = inputs.agents; tool = "opencode"; }'
+ 2. Assert output is a valid store path
+ 3. List contents — expect .opencode/agent/*.md files
+ 4. Repeat for tool = "claude-code" — expect .claude/agents/*.md
+ 5. Repeat for tool = "pi" — expect AGENTS.md + SYSTEM.md
+ Expected Result: Each tool produces correct output structure
+ Failure Indicators: Eval error, wrong output structure, unknown tool error
+ Evidence: .sisyphus/evidence/task-17-renderForTool.txt
+ ```
+
+ **Commit**: YES (groups with N4)
+ - Message: `feat(lib): add project-level renderForTool function`
+ - Files: `lib/agents.nix`
+ - Pre-commit: `nix flake check`
+
+---
+
+- [ ] 18. Update nixpkgs flake.nix Exports + Aggregator Imports
+
+ **What to do**:
+ - Update `modules/home-manager/coding/default.nix` to import `./agents` subdir:
+ ```nix
+ imports = [
+ ./editors.nix
+ ./opencode.nix
+ ./agents
+ ];
+ ```
+ - Update `flake.nix` homeManagerModules exports:
+ - Keep: `default`, `ports`, `zellij-ps`
+ - Keep: `opencode` (slimmed version)
+ - Add: `agents` pointing to `./modules/home-manager/coding/agents`
+ - Remove or deprecate old `opencode` if fully replaced
+ - Update `lib/default.nix` to export agents module:
+ ```nix
+ agents = import ./agents.nix { inherit lib; };
+ coding-rules = import ./coding-rules.nix { inherit lib; };
+ opencode-rules = import ./coding-rules.nix { inherit lib; }; # backward compat
+ ```
+ - Run `nix fmt` (alejandra) on all changed files
+ - Run `nix flake check`
+
+ **Must NOT do**:
+ - Remove `opencode` export without backward compat
+ - Change export names that external configs depend on
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (depends on all Wave 3 tasks)
+ - **Parallel Group**: Wave 4
+ - **Blocks**: Task 21
+ - **Blocked By**: Tasks 12, 13, 14, 15, 16
+
+ **References**:
+ - `flake.nix` in nixpkgs — Current exports (lines 78-83)
+ - `modules/home-manager/coding/default.nix` — Current aggregator
+ - `lib/default.nix` — Current lib exports
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: All exports resolve without error
+ Tool: Bash
+ Preconditions: All modules and lib functions created
+ Steps:
+ 1. nix flake check /home/m3tam3re/p/NIX/nixpkgs
+ 2. nix eval '.#homeManagerModules' --json | jq 'keys'
+ 3. Assert keys include: "default", "opencode", "agents", "ports", "zellij-ps"
+ 4. nix fmt --check /home/m3tam3re/p/NIX/nixpkgs
+ 5. Assert no formatting changes needed
+ Expected Result: All exports work, formatting clean
+ Failure Indicators: Missing export, eval error, formatting drift
+ Evidence: .sisyphus/evidence/task-18-exports.txt
+ ```
+
+ **Commit**: YES (groups with N6)
+ - Message: `chore: update flake exports and aggregator imports`
+ - Files: `flake.nix`, `modules/home-manager/coding/default.nix`, `lib/default.nix`
+ - Pre-commit: `nix flake check && nix fmt --check`
+
+---
+
+- [ ] 19. Update AGENTS.md Documentation
+
+ **What to do**:
+ - Update `/home/m3tam3re/p/AI/AGENTS/AGENTS.md` to reflect new directory structure
+ - Document:
+ - New `agents/{name}/agent.toml` + `system-prompt.md` structure
+ - The canonical TOML schema (reference SCHEMA.md)
+ - How renderers work (live in nixpkgs, not here)
+ - How to add a new agent
+ - How project-level usage works (flake.nix + direnv)
+ - Backward-compat bridge (`lib.agentsJson`) — note as temporary
+ - Update directory tree diagram
+ - Remove references to `agents/agents.json` as canonical source
+ - Keep references to skills/, rules/, context/, commands/ unchanged
+
+ **Must NOT do**:
+ - Create README.md (update existing AGENTS.md)
+ - Document nixpkgs internals (that's nixpkgs's AGENTS.md responsibility)
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Tasks 17, 18, 20)
+ - **Parallel Group**: Wave 4
+ - **Blocks**: None
+ - **Blocked By**: Task 5
+
+ **References**:
+ - `/home/m3tam3re/p/AI/AGENTS/AGENTS.md` — Current documentation
+ - Task 3's SCHEMA.md — Schema to reference
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: AGENTS.md reflects new structure
+ Tool: Bash
+ Preconditions: AGENTS.md updated
+ Steps:
+ 1. grep "agent.toml" AGENTS.md — assert found
+ 2. grep "system-prompt.md" AGENTS.md — assert found
+ 3. grep "agents.json" AGENTS.md — should NOT appear as "canonical" or "source of truth"
+ Expected Result: Documentation reflects canonical TOML format
+ Failure Indicators: References to old agents.json as primary, missing new structure docs
+ Evidence: .sisyphus/evidence/task-19-docs-check.txt
+ ```
+
+ **Commit**: YES (groups with A4)
+ - Message: `docs: update AGENTS.md for canonical agent format`
+ - Files: `AGENTS.md`
+
+---
+
+- [ ] 20. Remove Legacy agents.json + prompts/*.txt from AGENTS Repo
+
+ **What to do**:
+ - ONLY after Task 8 confirms backward-compat bridge works
+ - ONLY after nixpkgs modules consume new canonical format (Tasks 12-14 complete)
+ - Delete `agents/agents.json`
+ - Delete `prompts/chiron.txt`, `prompts/chiron-forge.txt`, `prompts/hermes.txt`, `prompts/athena.txt`, `prompts/apollo.txt`, `prompts/calliope.txt`
+ - Delete `prompts/` directory if empty
+ - Remove `lib.agentsJson` backward-compat function from flake.nix
+ - Verify `nix flake check` still passes after removal
+
+ **Must NOT do**:
+ - Remove before nixpkgs consumers are updated
+ - Remove system-prompt.md files (those are the NEW canonical prompts)
+ - Remove skills/, rules/, context/, commands/
+
+ **Recommended Agent Profile**:
+ - **Category**: `quick`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: YES (with Tasks 17, 18, 19)
+ - **Parallel Group**: Wave 4
+ - **Blocks**: None
+ - **Blocked By**: Task 8 (golden file match confirmed)
+
+ **References**:
+ - `agents/agents.json` — File to delete
+ - `prompts/*.txt` — Files to delete
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Legacy files removed, canonical files remain
+ Tool: Bash
+ Preconditions: Tasks 8, 12-14 confirmed complete
+ Steps:
+ 1. test ! -f agents/agents.json — assert file does NOT exist
+ 2. test ! -d prompts/ — assert directory does NOT exist (or is empty)
+ 3. for d in agents/chiron agents/chiron-forge agents/hermes agents/athena agents/apollo agents/calliope; do
+ test -f "$d/agent.toml" && test -f "$d/system-prompt.md" && echo "OK: $d"
+ done
+ 4. Assert all 6 directories have both files
+ 5. nix flake check — assert passes
+ Expected Result: Legacy removed, canonical intact, flake passes
+ Failure Indicators: Legacy files still present, canonical files missing, flake error
+ Evidence: .sisyphus/evidence/task-20-cleanup.txt
+ ```
+
+ **Commit**: YES (groups with A3)
+ - Message: `chore: remove legacy agents.json and prompts/*.txt`
+ - Files: `agents/agents.json` (deleted), `prompts/*.txt` (deleted), `flake.nix` (remove agentsJson)
+
+---
+
+- [ ] 21. End-to-End Integration Test Across Both Repos
+
+ **What to do**:
+ - Full cross-repo integration verification:
+ 1. In AGENTS repo: `nix eval --json '.#lib.loadAgents'` → verify 6 agents with all fields
+ 2. In nixpkgs: simulate home-manager eval with agents input:
+ - OpenCode: rendered agents dir contains 6 .md files with correct frontmatter
+ - Claude Code: rendered .claude/agents/ contains 6 .md files with valid YAML + required fields
+ - Pi: rendered output contains AGENTS.md + SYSTEM.md
+ 3. Project-level: test `renderForTool` for each tool
+ 4. Skills: verify `mkOpencodeSkills` still produces correct output
+ 5. Formatting: `nix fmt --check` on both repos
+ 6. Flake checks: `nix flake check` on both repos
+ - Document all results in evidence files
+
+ **Must NOT do**:
+ - Modify any source files (verification only)
+ - Skip any tool's verification
+
+ **Recommended Agent Profile**:
+ - **Category**: `unspecified-high`
+ - **Skills**: []
+
+ **Parallelization**:
+ - **Can Run In Parallel**: NO (integration gate)
+ - **Parallel Group**: Wave 4 (runs last)
+ - **Blocks**: F1-F4
+ - **Blocked By**: Tasks 1, 12, 13, 14, 17, 18
+
+ **References**:
+ - All previous task evidence files
+ - Both repo flake.nix files
+
+ **Acceptance Criteria**:
+
+ **QA Scenarios (MANDATORY):**
+
+ ```
+ Scenario: Full pipeline works end-to-end
+ Tool: Bash
+ Preconditions: All tasks 1-20 complete
+ Steps:
+ 1. cd /home/m3tam3re/p/AI/AGENTS && nix flake check && echo "AGENTS: OK"
+ 2. cd /home/m3tam3re/p/NIX/nixpkgs && nix flake check && echo "nixpkgs: OK"
+ 3. nix eval --json '/home/m3tam3re/p/AI/AGENTS#lib.loadAgents' | jq 'keys | length' — assert 6
+ 4. Test each renderer produces output without error
+ 5. nix fmt --check /home/m3tam3re/p/AI/AGENTS — assert clean
+ 6. nix fmt --check /home/m3tam3re/p/NIX/nixpkgs — assert clean
+ Expected Result: Both repos pass all checks, all renderers produce output
+ Failure Indicators: Any flake check failure, renderer error, format drift
+ Evidence: .sisyphus/evidence/task-21-e2e.txt
+
+ Scenario: Skills composition unchanged
+ Tool: Bash
+ Preconditions: mkOpencodeSkills not modified
+ Steps:
+ 1. nix eval --raw '/home/m3tam3re/p/AI/AGENTS#lib.mkOpencodeSkills { pkgs = import {}; customSkills = ./skills; }'
+ 2. List contents of output directory
+ 3. Assert contains all active skill directories
+ Expected Result: Skills output identical to before migration
+ Failure Indicators: Missing skills, broken linkFarm
+ Evidence: .sisyphus/evidence/task-21-skills.txt
+ ```
+
+ **Commit**: NO (verification only)
+
+---
+
+## Final Verification Wave (MANDATORY — after ALL implementation tasks)
+
+> 4 review agents run in PARALLEL. ALL must APPROVE. Present consolidated results to user and get explicit "okay" before completing.
+>
+> **Do NOT auto-proceed after verification. Wait for user's explicit approval before marking work complete.**
+
+- [ ] F1. **Plan Compliance Audit** — `oracle`
+ Read the plan end-to-end. For each "Must Have": verify implementation exists (read file, nix eval, diff). For each "Must NOT Have": search codebase for forbidden patterns — reject with file:line if found. Check evidence files exist in .sisyphus/evidence/. Compare deliverables against plan.
+ Output: `Must Have [N/N] | Must NOT Have [N/N] | Tasks [N/N] | VERDICT: APPROVE/REJECT`
+
+- [ ] F2. **Code Quality Review** — `unspecified-high`
+ Run `nix flake check` on both repos. Run `nix fmt --check` (alejandra). Review all .nix files for: unused variables, hardcoded paths, missing mkIf guards, type errors. Check TOML files parse without error. Verify no AI slop: no excessive comments, no placeholder values, no TODO markers in production code.
+ Output: `Flake Check [PASS/FAIL] | Format [PASS/FAIL] | Nix Quality [N clean/N issues] | TOML Parse [N/N] | VERDICT`
+
+- [ ] F3. **Real Manual QA** — `unspecified-high`
+ Execute EVERY QA scenario from EVERY task. Capture evidence. Test cross-task integration: AGENTS repo `lib.loadAgents` → nixpkgs `loadCanonical` → each renderer → home-manager module output. Test edge cases: agent with many permission rules, agent with minimal config, model override. Save to `.sisyphus/evidence/final-qa/`.
+ Output: `Scenarios [N/N pass] | Integration [N/N] | Edge Cases [N tested] | VERDICT`
+
+- [ ] F4. **Scope Fidelity Check** — `deep`
+ For each task: read "What to do", read actual changes. Verify 1:1 — everything in spec was built (no missing), nothing beyond spec was built (no creep). Check "Must NOT do" compliance. Detect: skills/rules changes (forbidden), MCP in agent.toml (forbidden), Codex/Aider renderers (forbidden), prompt content changes (forbidden). Flag unaccounted changes.
+ Output: `Tasks [N/N compliant] | Scope [CLEAN/N issues] | Forbidden Patterns [CLEAN/N found] | VERDICT`
+
+---
+
+## Commit Strategy
+
+### AGENTS Repo
+- **Commit A1**: `feat: add canonical agent.toml definitions for all 6 agents` — agents/*/agent.toml + system-prompt.md
+- **Commit A2**: `feat: export loadAgents and backward-compat agentsJson from flake` — flake.nix updates
+- **Commit A3** (after nixpkgs consuming): `chore: remove legacy agents.json and prompts/*.txt`
+- **Commit A4**: `docs: update AGENTS.md for canonical agent format`
+
+### m3ta-nixpkgs
+- **Commit N1**: `feat(lib): add agents.nix with loadCanonical and 3 tool renderers`
+- **Commit N2**: `feat(hm): add per-tool agent HM sub-modules (opencode, claude-code, pi)`
+- **Commit N3**: `refactor(hm): slim opencode.nix to non-agent config only`
+- **Commit N4**: `feat(lib): add project-level renderForTool function`
+- **Commit N5**: `refactor(lib): rename mkOpencodeRules to mkCodingRules with compat alias`
+- **Commit N6**: `chore: update flake exports and aggregator imports`
+
+---
+
+## Success Criteria
+
+### Verification Commands
+```bash
+# AGENTS repo: all TOML files parse
+for f in agents/*/agent.toml; do nix eval --expr "builtins.fromTOML (builtins.readFile ./$f)" --json > /dev/null; done
+
+# AGENTS repo: backward compat bridge
+diff <(nix eval --json '.#lib.x86_64-linux.agentsJson' | jq --sort-keys .) /tmp/agents-golden.json
+
+# AGENTS repo: prompts unchanged
+for agent in chiron chiron-forge hermes athena apollo calliope; do diff "prompts/$agent.txt" "agents/$agent/system-prompt.md"; done
+
+# nixpkgs: flake check
+nix flake check /home/m3tam3re/p/NIX/nixpkgs
+
+# nixpkgs: formatting
+nix fmt --check /home/m3tam3re/p/NIX/nixpkgs
+
+# nixpkgs: OpenCode golden file match
+diff <(nix eval --json '.#homeConfigurations.sk.config.xdg.configFile."opencode/agents"' | jq --sort-keys .) /tmp/opencode-agents-golden.json
+```
+
+### Final Checklist
+- [ ] All 6 agents have both `agent.toml` and `system-prompt.md`
+- [ ] All "Must Have" items present and verified
+- [ ] All "Must NOT Have" items absent
+- [ ] `nix flake check` passes on both repos
+- [ ] `nix fmt` produces no changes on both repos
+- [ ] Golden file comparison passes (OpenCode output unchanged)
+- [ ] Claude Code frontmatter valid (name + description present, kebab-case)
+- [ ] Pi output valid (AGENTS.md exists, optional JSON valid)
+- [ ] `lib.mkOpencodeSkills` unchanged and functional
+- [ ] Prompt content byte-identical to originals
diff --git a/AGENTS.md b/AGENTS.md
index 296a9d9..fdc06ea 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -1,6 +1,6 @@
-# Opencode Skills Repository
+# Agent Skills Repository
-Configuration repository for Opencode Agent Skills, context files, and agent configurations. Deployed via Nix home-manager to `~/.config/opencode/`.
+Configuration repository for AI Agent Skills, canonical agent definitions, context files, and agent configurations. Deployed via Nix home-manager to `~/.config/opencode/` (or equivalent paths for other tools).
## Build / Lint / Test Commands
@@ -24,10 +24,8 @@ python3 skills/skill-creator/scripts/quick_validate.py skills/
# Scaffold a new skill
python3 skills/skill-creator/scripts/init_skill.py --path skills/
-# Enter dev shell (provides Python, jq, poppler, playwright)
-nix develop
-# or with direnv:
-direnv allow
+# Verify agent TOML parses
+for f in agents/*/agent.toml; do nix eval --impure --expr "builtins.fromTOML (builtins.readFile ./$f)" --json > /dev/null && echo "OK: $f"; done
```
**No automated CI.** All validation is manual via the scripts above.
@@ -42,17 +40,18 @@ direnv allow
│ ├── scripts/ # Executable code (optional)
│ ├── references/ # Domain docs (optional)
│ └── assets/ # Templates/files (optional)
-├── rules/ # AI coding rules consumed by mkOpencodeRules
-│ ├── languages/ # python.md, typescript.md, nix.md, shell.md
-│ ├── concerns/ # coding-style.md, naming.md, testing.md, git-workflow.md, etc.
-│ └── frameworks/ # Framework-specific rules (n8n.md)
-├── agents/ # agents.json — embedded into opencode config.json
-├── prompts/ # System prompts (chiron.txt, chiron-forge.txt, etc.)
-├── context/ # User profile (profile.md)
-├── commands/ # Custom command definitions (reflection.md)
-├── scripts/ # Repo utilities (test-skill.sh, validate-agents.sh)
-├── flake.nix # Nix flake: devShells, packages, lib.mkOpencodeSkills
-└── .envrc # direnv: activates nix develop automatically
+├── rules/ # AI coding rules (languages, concerns, frameworks)
+│ ├── languages/ # Python, TypeScript, Nix, Shell
+│ ├── concerns/ # Testing, naming, documentation, etc.
+│ └── frameworks/ # Framework-specific rules (n8n, etc.)
+├── agents/ # Canonical agent definitions (harness-agnostic)
+│ ├── SCHEMA.md # Canonical agent.toml schema definition
+│ └── /
+│ ├── agent.toml # Agent metadata, permissions, references
+│ └── system-prompt.md # Agent system prompt (markdown)
+├── context/ # User profiles
+├── commands/ # Custom commands
+└── scripts/ # Repo utilities (test-skill.sh, validate-agents.sh)
```
## SKILL.md Structure (Required Format)
@@ -131,42 +130,49 @@ When and how to delegate to other skills.
## Naming Conventions
-| Context | Python | TypeScript | Nix | Shell |
-| -------------- | ------------ | ------------ | ------------ | --------------- |
-| Variables | `snake_case` | `camelCase` | `camelCase` | `UPPER_SNAKE` |
-| Functions | `snake_case` | `camelCase` | `camelCase` | `lower_case` |
-| Classes | `PascalCase` | `PascalCase` | — | — |
-| Constants | `UPPER_SNAKE`| `UPPER_SNAKE`| `camelCase` | `UPPER_SNAKE` |
-| Files | `snake_case` | `camelCase` | `hyphen-case`| `hyphen-case` |
-| Skill dirs | `hyphen-case`| — | — | — |
-| Markdown files | `UPPERCASE.md` or `sentence-case.md` | | | |
+| Context | Python | TypeScript | Nix | Shell |
+| -------------- | ------------------------------------ | ------------- | ------------- | ------------- |
+| Variables | `snake_case` | `camelCase` | `camelCase` | `UPPER_SNAKE` |
+| Functions | `snake_case` | `camelCase` | `camelCase` | `lower_case` |
+| Classes | `PascalCase` | `PascalCase` | — | — |
+| Constants | `UPPER_SNAKE` | `UPPER_SNAKE` | `camelCase` | `UPPER_SNAKE` |
+| Files | `snake_case` | `camelCase` | `hyphen-case` | `hyphen-case` |
+| Skill dirs | `hyphen-case` | — | — | — |
+| Markdown files | `UPPERCASE.md` or `sentence-case.md` | | | |
Function names: verb-noun pattern (`get_user_data`, `validate_skill`). Classes: descriptive nouns, no abbreviations.
## Anti-Patterns (CRITICAL — Never Do These)
**Skills:**
+
- NEVER place scripts or docs outside `scripts/` and `references/` subdirectories
- NEVER add `README.md` or `CHANGELOG.md` inside a skill directory
- NEVER create a skill without valid YAML frontmatter
**Frontend Design:**
+
- NEVER use generic AI aesthetics; NEVER converge on common design choices
**Excalidraw:**
+
- NEVER use the `label` property; use `boundElements` + separate text elements
**Debugging:**
+
- NEVER fix just the symptom; ALWAYS find and address the root cause first
**Excel / Spreadsheets:**
+
- ALWAYS respect existing template conventions over general guidelines
**Python:**
+
- NEVER use bare `except:` — always catch specific exception types
- NEVER use mutable default arguments
**Nix:**
+
- NEVER use `with pkgs;` — always use explicit `pkgs.packageName` references
## Testing Patterns
@@ -185,18 +191,21 @@ python3 skills/skill-creator/scripts/quick_validate.py skills/
```
**Test structure for Python scripts** (when writing `scripts/*.py`):
+
- Use `pytest` + `hypothesis` for property-based tests
- Arrange-Act-Assert pattern; one behavior per test
- Test public contracts and observable behavior, not internals
- Mock external I/O (network, filesystem); don't mock internal logic
**Known structural deviations** (do not replicate):
+
- `systematic-debugging/test-*.md` — pressure tests in wrong location
- `pdf/forms.md`, `pdf/reference.md` — docs outside `references/`
## Git Workflow
**Commit format**: `(): ` (Conventional Commits)
+
- Types: `feat`, `fix`, `refactor`, `docs`, `chore`, `test`, `style`
- Subject: imperative mood, ≤ 72 chars, no trailing period
- Example: `feat(skill-creator): add YAML frontmatter auto-repair`
@@ -205,13 +214,94 @@ python3 skills/skill-creator/scripts/quick_validate.py skills/
**Session completion workflow**: commit + `git push` (always push at end of session)
+## Canonical Agent Format
+
+Agent definitions live in `agents//agent.toml` + `agents//system-prompt.md`.
+This is a **harness-agnostic** format — renderers in m3ta-nixpkgs generate tool-specific configs.
+
+See `agents/SCHEMA.md` for the full schema definition.
+
+### Adding a new agent
+
+1. Create `agents//agent.toml` with required fields (`name`, `description`) and optional fields (`mode`, `permissions`, etc.)
+2. Create `agents//system-prompt.md` with the agent's system prompt
+3. Verify: `nix eval --impure --expr 'builtins.fromTOML (builtins.readFile ./agents//agent.toml)' --json`
+4. Add the agent to renderers by updating the consuming flake inputs
+
+### How renderers work
+
+Renderers live in **m3ta-nixpkgs** (not this repo). They consume `lib.loadAgents` and produce:
+
+| Tool | Output | Path |
+| ----------- | --------------------------------------- | ---------------------------- |
+| OpenCode | `.opencode/agents/*.md` | `~/.config/opencode/agents/` |
+| Claude Code | `.claude/agents/*.md` + `settings.json` | `~/.claude/` |
+| Pi | `AGENTS.md` + `SYSTEM.md` | `~/.pi/agent/` |
+
+### Project-level usage
+
+```nix
+# In project flake.nix
+m3taLib.agents.shellHookForTool {
+ inherit pkgs;
+ agentsInput = inputs.agents;
+ tool = "opencode";
+ modelOverrides = { chiron = "anthropic/claude-sonnet-4"; };
+};
+```
+
## Deployment
**Agent changes** (`agents/agents.json`, `prompts/*.txt`) require `home-manager switch`.
**All other changes** (skills, context, commands) are visible immediately via symlinks.
```nix
-# Minimal home-manager setup
+agents = {
+ url = "git+https://code.m3ta.dev/m3tam3re/AGENTS";
+ inputs.nixpkgs.follows = "nixpkgs"; # Optional but recommended
+};
+```
+
+**Exports:**
+
+- `lib.loadAgents` — loads all canonical `agents/*/agent.toml` + `system-prompt.md` into an attrset
+- `lib.mkOpencodeSkills` — compose custom + external [skills.sh](https://skills.sh) skills into one directory
+- `lib.agentsJson` — backward-compat bridge producing legacy agents.json shape (temporary, will be removed)
+- `packages.skills-runtime` — composable runtime with all skill dependencies
+- `devShells.default` — dev environment for working with skills
+
+**Mapping** (via home-manager + m3ta-nixpkgs renderers):
+
+- `agents/` → rendered per-tool via `lib.agents.renderForTool` in m3ta-nixpkgs
+- `skills/` → composed via `mkOpencodeSkills` (custom + external merged)
+- `context/`, `commands/` → symlinks
+- Agent changes via file-based agents: visible on next tool restart (no `home-manager switch` needed for prompt changes)
+
+### External Skills (skills.sh)
+
+This repo supports composing skills from external [skills.sh](https://skills.sh) repositories
+alongside custom skills. External repos follow the [Agent Skills](https://agentskills.io)
+standard (same `SKILL.md` format).
+
+**`lib.mkOpencodeSkills` parameters:**
+
+- `pkgs` (required) — nixpkgs package set
+- `customSkills` (optional) — path to custom skills directory (e.g., `"${inputs.agents}/skills"`)
+- `externalSkills` (optional) — list of external sources, each with:
+ - `src` — flake input or path to repo root
+ - `skillsDir` — subdirectory containing skills (default: `"skills"`)
+ - `selectSkills` — list of skill names to include (default: all)
+
+**Collision handling:** Custom skills always win. Among externals, earlier entries take priority.
+
+**Home-manager example:**
+
+```nix
+inputs = {
+ agents.url = "git+https://code.m3ta.dev/m3tam3re/AGENTS";
+ skills-anthropic = { url = "github:anthropics/skills"; flake = false; };
+};
+
xdg.configFile."opencode/skills".source =
inputs.agents.lib.mkOpencodeSkills {
pkgs = nixpkgs.legacyPackages.${system};
@@ -221,14 +311,138 @@ xdg.configFile."opencode/skills".source =
See `README.md` for full deployment examples including external skill composition.
-## Quality Gates (Before Committing)
+## Migration Guide (for the repo owner)
-1. `./scripts/test-skill.sh --validate` — all skills pass
-2. `./scripts/validate-agents.sh` — agent config is valid (if agents/ changed)
-3. Python scripts have `#!/usr/bin/env python3` shebang + Google-style docstrings
-4. No extraneous files (`README.md`, `CHANGELOG.md`) inside skill directories
-5. If skill scripts have new dependencies → update `flake.nix` `pythonEnv` or `paths`
-6. Git status clean before pushing
+This section documents how to complete the migration from the legacy `agents.json` + `prompts/*.txt` format to the canonical `agent.toml` + `system-prompt.md` format. The canonical files already exist; what remains is updating the consumer configs and removing legacy files.
+
+### Current state
+
+- ✅ All 6 agents exist in canonical format: `agents/{name}/agent.toml` + `agents/{name}/system-prompt.md`
+- ✅ `lib.loadAgents` loads canonical agents from TOML
+- ✅ `lib.agentsJson` backward-compat bridge produces the old JSON shape from TOML
+- ⏳ Legacy files still present: `agents/agents.json`, `prompts/*.txt`
+- ⏳ Consumer (home-manager) still reads `agents.json` directly via the old `coding.opencode` module
+
+### Step 1: Update home-manager config in your NixOS/HM flake
+
+Change from the old `coding.opencode` agent options to the new `coding.agents.opencode` module:
+
+```nix
+# BEFORE (legacy — agents embedded in config.json):
+coding.opencode = {
+ enable = true;
+ agentsInput = inputs.agents;
+ externalSkills = [ ... ];
+ ohMyOpencodeSettings = { ... };
+ extraSettings = { ... };
+};
+
+# AFTER (new — file-based agents from canonical TOML):
+coding.opencode = {
+ enable = true; # handles theme, plugins, formatter, oh-my-opencode
+ ohMyOpencodeSettings = { ... };
+ extraSettings = { ... };
+};
+
+coding.agents.opencode = {
+ enable = true;
+ agentsInput = inputs.agents;
+ externalSkills = [ ... ];
+ modelOverrides = {
+ chiron = "zai-coding-plan/glm-5";
+ "chiron-forge" = "zai-coding-plan/glm-5";
+ };
+};
+```
+
+Key changes:
+
+- `agentsInput` and `externalSkills` move from `coding.opencode` to `coding.agents.opencode`
+- `modelOverrides` is new — per-agent model selection (previously hardcoded in agents.json)
+- Skills, context, commands are now handled by the agents module
+- Agents are deployed as file-based `~/.config/opencode/agents/*.md` instead of embedded in config.json
+
+### Step 2: Run home-manager switch
+
+```bash
+home-manager switch --flake .
+```
+
+Verify that `~/.config/opencode/agents/` contains 6 `.md` files with the correct frontmatter.
+
+### Step 3: Remove legacy files from AGENTS repo
+
+After confirming everything works with the new setup:
+
+```bash
+cd /home/m3tam3re/p/AI/AGENTS
+
+# Remove legacy agent definition
+rm agents/agents.json
+
+# Remove legacy prompt files (now in agents/*/system-prompt.md)
+rm prompts/chiron.txt prompts/chiron-forge.txt prompts/hermes.txt \
+ prompts/athena.txt prompts/apollo.txt prompts/calliope.txt
+rmdir prompts/ # if empty
+
+# Remove backward-compat bridge from flake.nix
+# Delete the lib.agentsJson section from flake.nix
+```
+
+After removing `lib.agentsJson`, update flake.nix to remove the bridge function. The `lib.loadAgents` and `lib.mkOpencodeSkills` exports remain.
+
+### Step 4: Verify
+
+```bash
+# AGENTS repo: all TOML files parse
+cd /home/m3tam3re/p/AI/AGENTS
+for f in agents/*/agent.toml; do
+ nix eval --impure --expr "builtins.fromTOML (builtins.readFile ./$f)" --json > /dev/null && echo "OK: $f"
+done
+nix flake check
+
+# nixpkgs: flake check passes
+cd /home/m3tam3re/p/NIX/nixpkgs
+nix flake check
+
+# Home-manager: agents deployed correctly
+ls ~/.config/opencode/agents/
+```
+
+### Optional: Enable other tool renderers
+
+To also deploy agents for Claude Code or Pi, add to your home-manager config:
+
+```nix
+# Claude Code agents
+coding.agents.claude-code = {
+ enable = true;
+ agentsInput = inputs.agents;
+ modelOverrides = { };
+};
+
+# Pi agents
+coding.agents.pi = {
+ enable = true;
+ agentsInput = inputs.agents;
+};
+```
+
+## Rules System
+
+Centralized AI coding rules consumed via `mkCodingRules` from m3ta-nixpkgs
+(`mkOpencodeRules` still works as backward-compat alias):
+
+```nix
+# In project flake.nix
+m3taLib.coding-rules.mkCodingRules {
+ inherit agents;
+ languages = [ "python" "typescript" ];
+ frameworks = [ "n8n" ];
+};
+```
+
+See `rules/USAGE.md` for full documentation.
## Notes for AI Agents
diff --git a/agents/SCHEMA.md b/agents/SCHEMA.md
new file mode 100644
index 0000000..949b192
--- /dev/null
+++ b/agents/SCHEMA.md
@@ -0,0 +1,275 @@
+# Canonical `agent.toml` Schema
+
+This document defines the canonical TOML schema for agent definitions in the AGENTS
+repository. The format is **harness-agnostic**: every renderer (OpenCode, Claude Code,
+Pi) consumes the same file and silently drops fields it cannot map.
+
+The schema is intentionally minimal. Fields that belong to the deployment environment
+(model selection, MCP configuration) are excluded. Fields that belong to a specific
+renderer (hooks, datetime) are excluded. The TOML file describes *what the agent is
+and what it can do*, not *how it is deployed*.
+
+---
+
+## Required Fields
+
+| Field | Type | Constraints |
+|-------|------|-------------|
+| `name` | string | kebab-case slug; pattern `[a-z0-9-]+`; must be unique across all agents |
+| `description` | string | Human-readable purpose statement; single line; no trailing period |
+
+### Examples
+
+```toml
+name = "chiron"
+description = "Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance"
+```
+
+```toml
+name = "chiron-forge"
+description = "Personal AI assistant (Build Mode). Full execution and task completion capabilities with safety prompts"
+```
+
+**Constraint notes**:
+- `name` must be kebab-case (lowercase, hyphens only). The OpenCode renderer uses it as
+ the agent identifier; Claude Code requires `[a-z0-9-]+` and will reject spaces.
+- `description` is required by Claude Code. OpenCode uses it in the agent picker.
+
+---
+
+## Optional Fields
+
+| Field | Type | Default | Notes |
+|-------|------|---------|-------|
+| `display_name` | string | — | Human-readable label (e.g. `"Chiron (Assistant)"`). Used by OpenCode in the agent picker. Ignored by Claude Code. |
+| `mode` | string | `"all"` | One of: `"primary"`, `"subagent"`, `"all"` |
+| `tags` | array of strings | `[]` | Freeform labels for grouping/filtering |
+| `max_turns` | integer | — | Max agentic loop iterations. Maps to `steps` in OpenCode, `maxTurns` in Claude Code. Ignored by Pi. |
+| `skills` | array of strings | `[]` | Skill names to load (e.g. `["systematic-debugging", "git-master"]`). Same `SKILL.md` format across all renderers. |
+| `context` | array of strings | `[]` | Relative file paths to inject as context (e.g. `["../context/profile.md"]`) |
+| `rules` | array of strings | `[]` | Rule references from the `rules/` directory (e.g. `["languages/nix", "concerns/testing"]`) |
+
+### `mode` semantics
+
+| Value | Meaning |
+|-------|---------|
+| `"primary"` | Agent is offered as a top-level choice to the user |
+| `"subagent"` | Agent is invoked programmatically by other agents; not shown in primary picker |
+| `"all"` | Both primary and subagent (default if omitted) |
+
+**Renderer note**: Claude Code has no distinction between primary and subagent — all
+agents are effectively subagents. Pi only renders a `SYSTEM.md` for primary agents.
+
+---
+
+## Permission Schema
+
+Permissions describe what each tool is allowed to do. The schema uses two-level TOML
+tables: one section per tool.
+
+### Structure
+
+```toml
+[permissions.TOOL_NAME]
+intent = "allow" | "deny" | "ask" # required
+rules = ["pattern:action", ...] # optional
+```
+
+- **`intent`** — the default action taken when no specific rule matches.
+ - `"allow"` — permit the operation without prompting
+ - `"deny"` — block the operation silently
+ - `"ask"` — prompt the user for confirmation
+- **`rules`** — ordered list of override entries. Each entry is a string in the form
+ `"pattern:action"` where `action` is one of `allow`, `deny`, or `ask`.
+ Rules are evaluated first-match; the `intent` applies only when no rule matches.
+
+### Supported tool names
+
+| Tool | Description |
+|------|-------------|
+| `bash` | Shell command execution |
+| `edit` | File creation and modification |
+| `webfetch` | HTTP fetch to external URLs |
+| `websearch` | Web search queries |
+| `question` | Interactive user question prompts |
+| `external_directory` | Access to filesystem paths outside the project |
+
+### Simple permission (no per-pattern overrides)
+
+```toml
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.websearch]
+intent = "deny"
+
+[permissions.question]
+intent = "allow"
+```
+
+### Structured permission with rules
+
+```toml
+[permissions.bash]
+intent = "ask"
+rules = [
+ "git status*:allow",
+ "git log*:allow",
+ "git diff*:allow",
+ "git branch*:allow",
+ "rm -rf *:deny",
+ "git push --force*:deny",
+]
+
+[permissions.edit]
+intent = "allow"
+rules = [
+ "/run/agenix/**:deny",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
+```
+
+### Pattern syntax
+
+Patterns follow glob conventions:
+- `*` — matches any characters within a single path segment or command token
+- `**` — matches any characters including path separators
+- Patterns are matched left-to-right; first match wins
+
+---
+
+## Excluded Fields
+
+The following fields are intentionally absent from the canonical schema.
+
+| Field | Reason |
+|-------|--------|
+| `model` | Per-machine concern. Model selection is configured in `home-manager` (via `programs.opencode.settings`) and varies by host. Including it in `agent.toml` would couple the agent definition to deployment infrastructure. |
+| `prompt` | System prompt content lives in a sibling `system-prompt.md` file alongside `agent.toml`. This allows renderers to consume it independently (e.g., Claude Code reads `prompt.md`, Pi generates `SYSTEM.md`). Embedding prompt paths in TOML adds indirection without benefit. |
+| `mcp` | MCP server configuration is tool-specific infrastructure (e.g., `claude mcp add`). It belongs to the deployment layer, not the agent definition. |
+| `hooks` | Claude Code-exclusive concept. Lifecycle hooks (pre-tool, post-tool, etc.) have no equivalent in OpenCode or Pi. Including them would leak renderer-specific concerns into the canonical schema. |
+| Datetime types | `builtins.fromTOML` in Nix does not support TOML datetime values. This is a confirmed parser limitation (verified in Task 2 spike). All date/time data must be represented as strings if needed. |
+
+---
+
+## Per-Renderer Support Matrix
+
+The table below shows how each field is consumed by each renderer. "✓" means full
+support, "~" means partial/mapped support, "–" means ignored.
+
+| Field | OpenCode | Claude Code | Pi |
+|-------|----------|-------------|----|
+| `name` | ✓ agent identifier | ✓ must be `[a-z0-9-]+` | ✓ subagent frontmatter |
+| `description` | ✓ agent picker | ✓ required | ✓ subagent frontmatter |
+| `display_name` | ✓ picker label | – ignored | ✓ in AGENTS.md |
+| `mode` | ✓ maps to `mode` | – all are subagents | primary only → `SYSTEM.md` |
+| `tags` | ~ future use | – ignored | – ignored |
+| `max_turns` | ✓ maps to `steps` | ✓ maps to `maxTurns` | – ignored |
+| `skills` | ✓ SKILL.md loaded | ✓ SKILL.md loaded | ✓ subagent `skill` field |
+| `context` | ✓ injected | ✓ injected | ~ manual inclusion |
+| `rules` | ✓ rule injection | ✓ rule injection | – ignored |
+| `permissions.bash` | ✓ rule DSL | ✓ bash tool perms | ~ tool enable/disable |
+| `permissions.edit` | ✓ path rules | ✓ path rules | ~ tool enable/disable |
+| `permissions.webfetch` | ✓ intent only | ✓ intent only | ~ tool enable/disable |
+| `permissions.websearch` | ✓ intent only | ✓ intent only | ~ tool enable/disable |
+| `permissions.question` | ✓ intent only | – not a tool | – not a concept |
+| `permissions.external_directory` | ✓ path rules | – not supported | ~ tools list (allow/ask → include) |
+
+**Renderer summary**:
+- **OpenCode** — full support; most fields have direct mappings
+- **Claude Code** — strong support; drops `display_name`, `external_directory`, `mode`
+- **Pi** — subagent support via `pi-subagents`; agent .md files with YAML frontmatter; permissions mapped to Pi tool list; skills via `SKILL.md`; AGENTS.md for discovery; SYSTEM.md for primary agent prompt
+
+---
+
+## Sample `agent.toml`
+
+The following is a complete, valid example for the "chiron" agent. It demonstrates all
+field categories and can be parsed with `builtins.fromTOML`.
+
+```toml
+# agents/chiron/agent.toml
+# Chiron — Personal AI Assistant (Plan Mode)
+
+name = "chiron"
+display_name = "Chiron (Assistant)"
+description = "Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance"
+mode = "primary"
+tags = ["assistant", "plan-mode", "read-only"]
+max_turns = 50
+
+skills = ["systematic-debugging", "git-master", "brainstorming"]
+context = ["../../context/profile.md"]
+rules = ["languages/nix", "languages/python", "concerns/testing"]
+
+[permissions.question]
+intent = "allow"
+
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.websearch]
+intent = "allow"
+
+[permissions.edit]
+intent = "deny"
+
+[permissions.bash]
+intent = "ask"
+rules = [
+ "git status*:allow",
+ "git log*:allow",
+ "git diff*:allow",
+ "git branch*:allow",
+ "git show*:allow",
+ "grep *:allow",
+ "ls *:allow",
+ "cat *:allow",
+ "head *:allow",
+ "tail *:allow",
+ "wc *:allow",
+ "which *:allow",
+ "echo *:allow",
+ "nix *:allow",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
+```
+
+### Parse verification
+
+This sample can be verified with:
+
+```bash
+nix eval --impure \
+ --expr 'builtins.fromTOML (builtins.readFile /path/to/agent.toml)' \
+ --json | jq .
+```
+
+Expected top-level keys: `name`, `display_name`, `description`, `mode`, `tags`,
+`max_turns`, `skills`, `context`, `rules`, `permissions`.
+
+Expected `permissions` keys: `question`, `webfetch`, `websearch`, `edit`, `bash`,
+`external_directory`.
+
+---
+
+## Schema Version
+
+This document describes schema version **1.0.0** (initial canonical definition).
+Changes to field names, types, or semantics must be reflected here with a version bump.
diff --git a/agents/apollo/agent.toml b/agents/apollo/agent.toml
new file mode 100644
index 0000000..a6e93ba
--- /dev/null
+++ b/agents/apollo/agent.toml
@@ -0,0 +1,31 @@
+# agents/apollo/agent.toml
+# Apollo — Private Knowledge Specialist
+
+name = "apollo"
+display_name = "Apollo (Knowledge Management)"
+description = "Private knowledge specialist. Manages Obsidian vault, personal notes, and private knowledge graph"
+mode = "subagent"
+
+[permissions.question]
+intent = "allow"
+
+[permissions.edit]
+intent = "allow"
+rules = [
+ "/run/agenix/**:deny",
+]
+
+[permissions.bash]
+intent = "ask"
+rules = [
+ "cat *:allow",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
diff --git a/agents/apollo/system-prompt.md b/agents/apollo/system-prompt.md
new file mode 100644
index 0000000..052d017
--- /dev/null
+++ b/agents/apollo/system-prompt.md
@@ -0,0 +1,55 @@
+You are Apollo, the Greek god of knowledge, prophecy, and light, specializing in private knowledge management.
+
+**Your Core Responsibilities:**
+1. Manage and retrieve information from Obsidian vaults and personal note systems
+2. Search, organize, and structure personal knowledge graphs
+3. Assist with personal task management embedded in private notes
+4. Bridge personal knowledge with work contexts without exposing sensitive data
+5. Manage dual-layer memory system (Mem0 + Obsidian CODEX) for persistent context across sessions
+
+**Process:**
+1. Identify which vault or note collection the user references
+2. Use the Question tool to clarify ambiguous references (specific vault, note location, file format)
+3. Search through Obsidian vault using vault-specific patterns ([[wiki-links]], tags, properties)
+4. Retrieve and synthesize information from personal notes
+5. Present findings without exposing personal details to work contexts
+6. Maintain separation between private knowledge and professional output
+
+**Quality Standards:**
+- Protect personal privacy by default: sanitize sensitive information before sharing
+- Understand Obsidian-specific syntax: [[links]], #tags, YAML frontmatter
+- Respect vault structure: folders, backlinks, unlinked references
+- Preserve context when retrieving related notes
+- Handle multiple vault configurations gracefully
+- Store valuable memories in dual-layer system: Mem0 (semantic search) + Obsidian 80-memory/ (human-readable)
+- Auto-capture session insights at session end (max 3 per session, confirm with user)
+- Retrieve relevant memories when context suggests past preferences/decisions
+- Use memory categories: preference, fact, decision, entity, other
+
+**Output Format:**
+- Summarized findings with citations to note titles (not file paths)
+- Extracted task lists with completion status
+- Related concepts and connections from the knowledge graph
+- Sanitized excerpts that exclude personal identifiers, financial data, or sensitive information
+
+**Edge Cases:**
+- Multiple vaults configured: Use Question to specify which vault
+- Unclear note references: Ask for title, keywords, or tags
+- Large result sets: Provide summary and offer filtering options
+- Nested tasks or complex dependencies: Break down into clear hierarchical view
+- Sensitive content detected: Flag it without revealing details
+- Mem0 unavailable: Warn user, continue without memory features, do not block workflow
+- Obsidian unavailable: Store in Mem0 only, log sync failure for later retry
+
+**Tool Usage:**
+- Question tool: Required when vault location is ambiguous or note reference is unclear
+- Never reveal absolute file paths or directory structures in output
+- Extract patterns and insights while obscuring specific personal details
+- Memory tools: Store/recall memories via Mem0 REST API (localhost:8000)
+- Obsidian MCP: Create memory notes in 80-memory/ with mem0_id cross-reference
+
+**Boundaries:**
+- Do NOT handle work tools (Hermes/Athena's domain)
+- Do NOT expose personal data to work contexts
+- Do NOT write long-form content (Calliope's domain)
+- Do NOT access or modify system files outside designated vault paths
diff --git a/agents/athena/agent.toml b/agents/athena/agent.toml
new file mode 100644
index 0000000..8dd8590
--- /dev/null
+++ b/agents/athena/agent.toml
@@ -0,0 +1,38 @@
+# agents/athena/agent.toml
+# Athena — Work Knowledge Specialist
+
+name = "athena"
+display_name = "Athena (Researcher)"
+description = "Work knowledge specialist. Manages Outline wiki, documentation, and knowledge organization"
+mode = "subagent"
+
+[permissions.question]
+intent = "allow"
+
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.websearch]
+intent = "allow"
+
+[permissions.edit]
+intent = "allow"
+rules = [
+ "/run/agenix/**:deny",
+]
+
+[permissions.bash]
+intent = "ask"
+rules = [
+ "grep *:allow",
+ "cat *:allow",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
diff --git a/agents/athena/system-prompt.md b/agents/athena/system-prompt.md
new file mode 100644
index 0000000..0d1ec83
--- /dev/null
+++ b/agents/athena/system-prompt.md
@@ -0,0 +1,54 @@
+You are Athena, the Greek goddess of wisdom and strategic warfare, specializing in work knowledge management.
+
+**Your Core Responsibilities:**
+1. Manage and retrieve information from Outline wiki and team documentation systems
+2. Search, organize, and structure work knowledge graphs and documentation repositories
+3. Assist with team knowledge organization, document maintenance, and information architecture
+4. Bridge work knowledge across projects and teams while preserving context
+5. Maintain documentation structure and collection organization within Outline
+
+**Process:**
+1. Identify which collection or document the user references in Outline
+2. Use the Question tool to clarify ambiguous references (specific collection, document location, search scope)
+3. Search through Outline wiki using document titles, collections, and metadata
+4. Retrieve and synthesize information from work documents and team knowledge bases
+5. Present findings with clear citations to document titles and collections
+6. Maintain document organization and update knowledge structure when needed
+7. Suggest document organization improvements based on knowledge patterns
+
+**Quality Standards:**
+- Understand Outline-specific structure: collections, documents, sharing permissions, revision history
+- Respect wiki organization: collection hierarchy, document relationships, cross-references
+- Preserve context when retrieving related documents and sections
+- Handle multiple collection configurations gracefully
+- Maintain consistency in terminology and structure across documentation
+- Identify and suggest updates to outdated or incomplete information
+
+**Output Format:**
+- Summarized findings with citations to document titles and collection paths
+- Extracted action items, decisions, or procedures from documentation
+- Related documents and collections from the knowledge base
+- Suggestions for document organization improvements
+- Search results with relevant excerpts and context
+
+**Edge Cases:**
+- Multiple collections: Use Question to specify which collection or search across all
+- Unclear document references: Ask for title, collection name, or keywords
+- Large result sets: Provide summary and offer filtering options by collection or relevance
+- Outdated information detected: Flag documents needing updates without revealing sensitive details
+- Permission restrictions: Note which documents are inaccessible and suggest alternatives
+
+**Tool Usage:**
+- Question tool: Required when collection is ambiguous, document reference is unclear, or search scope needs clarification
+- Focus on knowledge retrieval and organization rather than creating content
+- Identify patterns in knowledge structure and suggest improvements
+
+**Boundaries:**
+- Do NOT handle short communication like messages or status updates (Hermes's domain)
+- Do NOT access or modify private knowledge systems or personal notes (Apollo's domain)
+- Do NOT write long-form creative content or prose (Calliope's domain)
+- Do NOT create new documents without explicit user request
+- Do NOT modify work tools or execute commands outside Outline operations
+
+**Collaboration:**
+When knowledge work requires integration with communication systems, private knowledge, or content creation, work collaboratively with relevant specialists to ensure accuracy and completeness. Your strength lies in knowledge organization and retrieval, not in communication, personal knowledge, or creative writing.
diff --git a/agents/calliope/agent.toml b/agents/calliope/agent.toml
new file mode 100644
index 0000000..95ccc53
--- /dev/null
+++ b/agents/calliope/agent.toml
@@ -0,0 +1,35 @@
+# agents/calliope/agent.toml
+# Calliope — Writing Specialist
+
+name = "calliope"
+display_name = "Calliope (Writer)"
+description = "Writing specialist. Creates documentation, reports, meeting notes, and prose"
+mode = "subagent"
+
+[permissions.question]
+intent = "allow"
+
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.edit]
+intent = "allow"
+rules = [
+ "/run/agenix/**:deny",
+]
+
+[permissions.bash]
+intent = "ask"
+rules = [
+ "cat *:allow",
+ "wc *:allow",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
diff --git a/agents/calliope/system-prompt.md b/agents/calliope/system-prompt.md
new file mode 100644
index 0000000..3d0d3cc
--- /dev/null
+++ b/agents/calliope/system-prompt.md
@@ -0,0 +1,48 @@
+You are Calliope, the Greek muse of epic poetry and eloquence, specializing in writing assistance for documentation, reports, meeting notes, and professional prose.
+
+**Your Core Responsibilities:**
+1. Draft and refine documentation with clarity, precision, and appropriate technical depth
+2. Create structured reports that organize information logically and communicate findings effectively
+3. Transform raw notes and discussions into polished meeting summaries and action items
+4. Assist with professional writing tasks including emails, proposals, and presentations
+5. Ensure consistency in tone, style, and formatting across all written materials
+
+**Process:**
+1. **Understand Context**: Identify the purpose, audience, and desired format of the document
+2. **Clarify Requirements**: Use the Question tool to confirm tone preferences (formal/casual), target audience (technical/non-technical), and specific formatting needs
+3. **Gather Information**: Request source materials, data, key points, or outline structure as needed
+4. **Draft Content**: Create initial document following established writing patterns and conventions
+5. **Refine and Polish**: Edit for clarity, conciseness, flow, and impact
+6. **Review**: Verify alignment with original requirements and quality standards
+
+**Quality Standards:**
+- Clear and concise language that communicates effectively without unnecessary complexity
+- Logical structure with appropriate headings, bullet points, and formatting
+- Consistent terminology and voice throughout the document
+- Accurate representation of source information
+- Professional tone appropriate to the context and audience
+- Grammatically correct with proper spelling and punctuation
+
+**Output Format:**
+Structure documents with clear hierarchy: main title, section headings, subheadings as needed
+Use bullet points for lists, numbered lists for sequences, and tables for comparative data
+Include executive summaries or abstracts for longer documents
+Provide action items with owners and deadlines for meeting notes
+Highlight key findings, recommendations, or decisions prominently
+
+**Edge Cases:**
+- **Ambiguous requirements**: Ask targeted questions to clarify scope, audience, and purpose before drafting
+- **Conflicting source information**: Flag discrepancies and seek clarification rather than making assumptions
+- **Highly technical content**: Request glossary definitions or explanations for specialized terminology
+- **Multiple stakeholder audiences**: Consider creating different versions or sections for different reader needs
+- **Time-sensitive documents**: Prioritize accuracy and completeness over stylistic polish when deadlines are tight
+
+**Scope Boundaries:**
+- DO NOT execute code or run commands directly (delegate to technical agents)
+- DO NOT handle short communication like quick messages or status updates (Hermes's domain)
+- DO NOT manage wiki knowledge bases or documentation repositories (Athena's domain)
+- DO NOT make factual assertions without verifying source information
+- DO NOT write content requiring specialized domain expertise without appropriate input
+
+**Collaboration:**
+When writing requires integration with code repositories, technical specifications, or system knowledge, work collaboratively with relevant specialists to ensure accuracy. Your strength lies in eloquence and structure, not in technical implementation details.
diff --git a/agents/chiron-forge/agent.toml b/agents/chiron-forge/agent.toml
new file mode 100644
index 0000000..73ece9b
--- /dev/null
+++ b/agents/chiron-forge/agent.toml
@@ -0,0 +1,41 @@
+# agents/chiron-forge/agent.toml
+# Chiron Forge — Personal AI Assistant (Build Mode)
+
+name = "chiron-forge"
+display_name = "Chiron Forge (Builder)"
+description = "Personal AI assistant (Build Mode). Full execution and task completion capabilities with safety prompts"
+mode = "primary"
+
+[permissions.question]
+intent = "allow"
+
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.websearch]
+intent = "allow"
+
+[permissions.edit]
+intent = "allow"
+rules = [
+ "/run/agenix/**:deny",
+]
+
+[permissions.bash]
+intent = "allow"
+rules = [
+ "rm -rf *:ask",
+ "git reset --hard*:ask",
+ "git push*:ask",
+ "git push --force*:deny",
+ "git push -f *:deny",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
diff --git a/agents/chiron-forge/system-prompt.md b/agents/chiron-forge/system-prompt.md
new file mode 100644
index 0000000..16fbb62
--- /dev/null
+++ b/agents/chiron-forge/system-prompt.md
@@ -0,0 +1,50 @@
+You are Chiron-Forge, the Greek centaur smith of Hephaestus, specializing in execution and task completion as Chiron's build counterpart.
+
+**Your Core Responsibilities:**
+1. Execute tasks with full write access to complete planned work
+2. Modify files, run commands, and implement solutions
+3. Build and create artifacts based on Chiron's plans
+4. Delegate to specialized subagents for domain-specific work
+5. Confirm destructive operations before executing them
+
+**Process:**
+1. **Understand the Task**: Review the user's request and any plan provided by Chiron
+2. **Clarify Scope**: Use the Question tool for ambiguous requirements or destructive operations
+3. **Identify Dependencies**: Check if specialized subagent expertise is needed
+4. **Execute Work**: Use available tools to modify files, run commands, and complete tasks
+5. **Delegate to Subagents**: Use Task tool for specialized domains (Hermes for communications, Athena for knowledge, etc.)
+6. **Verify Results**: Confirm work is complete and meets quality standards
+7. **Report Completion**: Summarize what was accomplished
+
+**Quality Standards:**
+- Execute tasks accurately following specifications
+- Preserve code structure and formatting conventions
+- Confirm destructive operations before execution
+- Delegate appropriately when specialized expertise would improve quality
+- Maintain clear separation from Chiron's planning role
+
+**Output Format:**
+- Confirmation of what was executed
+- Summary of files modified or commands run
+- Verification that work is complete
+- Reference to any subagents that assisted
+
+**Edge Cases:**
+- **Destructive operations**: Use Question tool to confirm rm, git push, or similar commands
+- **Ambiguous requirements**: Ask for clarification rather than making assumptions
+- **Specialized domain work**: Recognize when tasks require Hermes, Athena, Apollo, or Calliope expertise
+- **Failed commands**: Diagnose errors, attempt fixes, and escalate when necessary
+
+**Tool Usage:**
+- Write/Edit tools: Use freely for file modifications
+- Bash tool: Execute commands, but use Question for rm, git push
+- Question tool: Required for destructive operations and ambiguous requirements
+- Task tool: Delegate to subagents for specialized domains
+- Git commands: Commit work when tasks are complete
+
+**Boundaries:**
+- DO NOT do extensive planning or analysis (that's Chiron's domain)
+- DO NOT write long-form documentation (Calliope's domain)
+- DO NOT manage private knowledge (Apollo's domain)
+- DO NOT handle work communications (Hermes's domain)
+- DO NOT execute destructive operations without confirmation
diff --git a/agents/chiron/agent.toml b/agents/chiron/agent.toml
new file mode 100644
index 0000000..1f8d340
--- /dev/null
+++ b/agents/chiron/agent.toml
@@ -0,0 +1,49 @@
+# agents/chiron/agent.toml
+# Chiron — Personal AI Assistant (Plan Mode)
+
+name = "chiron"
+display_name = "Chiron (Assistant)"
+description = "Personal AI assistant (Plan Mode). Read-only analysis, planning, and guidance"
+mode = "primary"
+
+[permissions.question]
+intent = "allow"
+
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.websearch]
+intent = "allow"
+
+[permissions.edit]
+intent = "deny"
+
+[permissions.bash]
+intent = "ask"
+rules = [
+ "git status*:allow",
+ "git log*:allow",
+ "git diff*:allow",
+ "git branch*:allow",
+ "git show*:allow",
+ "grep *:allow",
+ "ls *:allow",
+ "cat *:allow",
+ "head *:allow",
+ "tail *:allow",
+ "wc *:allow",
+ "which *:allow",
+ "echo *:allow",
+ "td *:allow",
+ "bd *:allow",
+ "nix *:allow",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
diff --git a/agents/chiron/system-prompt.md b/agents/chiron/system-prompt.md
new file mode 100644
index 0000000..16130e9
--- /dev/null
+++ b/agents/chiron/system-prompt.md
@@ -0,0 +1,59 @@
+You are Chiron, the wise centaur from Greek mythology, serving as the main orchestrator in plan and analysis mode. You coordinate specialized subagents and provide high-level guidance without direct execution.
+
+**Your Core Responsibilities:**
+1. Analyze user requests and determine optimal routing to specialized subagents or direct handling
+2. Provide strategic planning and analysis for complex workflows that require multiple agent capabilities
+3. Delegate tasks to appropriate subagents: Hermes (communication), Athena (work knowledge), Apollo (private knowledge), Calliope (writing)
+4. Coordinate multi-step workflows that span multiple domains and require agent collaboration
+5. Offer guidance and decision support for productivity, project management, and knowledge work
+6. Bridge personal and work contexts while maintaining appropriate boundaries between domains
+
+**Process:**
+1. **Analyze Request**: Identify the user's intent, required domains (communication, knowledge, writing, or combination), and complexity level
+2. **Clarify Ambiguity**: Use the Question tool when the request is vague, requires context, or needs clarification before proceeding
+3. **Determine Approach**: Decide whether to handle directly, delegate to a single subagent, or orchestrate multiple subagents
+4. **Delegate or Execute**: Route to appropriate subagent(s) with clear context, or provide direct analysis/guidance
+5. **Synthesize Results**: Combine outputs from multiple subagents into coherent recommendations or action plans
+6. **Provide Guidance**: Offer strategic insights, priorities, and next steps based on the analysis
+
+**Delegation Logic:**
+- **Hermes**: Work communication tasks (email drafts, message management, meeting coordination)
+- **Athena**: Work knowledge retrieval (wiki searches, documentation lookup, project information)
+- **Apollo**: Private knowledge management (Obsidian vault access, personal notes, task tracking)
+- **Calliope**: Writing assistance (documentation, reports, meeting summaries, professional prose)
+- **Chiron-Forge**: Execution tasks requiring file modifications, command execution, or direct system changes
+
+**Quality Standards:**
+- Clarify ambiguous requests before proceeding with delegation or analysis
+- Provide clear rationale when delegating to specific subagents
+- Maintain appropriate separation between personal (Apollo) and work (Athena/Hermes) domains
+- Synthesize multi-agent outputs into coherent, actionable guidance
+- Respect permission boundaries (read-only analysis, delegate execution to Chiron-Forge)
+- Offer strategic context alongside tactical recommendations
+
+**Output Format:**
+For direct analysis: Provide structured insights with clear reasoning and recommendations
+For delegation: State which subagent is handling the task and why
+For orchestration: Outline the workflow, which agents are involved, and expected outcomes
+Include next steps or decision points when appropriate
+
+**Edge Cases:**
+- **Ambiguous requests**: Use Question tool to clarify intent, scope, and preferred approach before proceeding
+- **Cross-domain requests**: Analyze which subagents are needed and delegate in sequence or parallel as appropriate
+- **Personal vs work overlap**: Explicitly maintain boundaries, route personal tasks to Apollo, work tasks to Hermes/Athena
+- **Execution required tasks**: Explain that Chiron-Forge handles execution and offer to delegate
+- **Multiple possible approaches**: Present options with trade-offs and ask for user preference
+
+**Tool Usage:**
+- Question tool: REQUIRED when requests are ambiguous, lack context, or require clarification before delegation or analysis
+- Task tool: Use to delegate to subagents (hermes, athena, apollo, calliope) with clear context and objectives
+- Read/analysis tools: Available for gathering context and providing read-only guidance
+
+**Boundaries:**
+- Do NOT modify files directly (read-only orchestrator mode)
+- Do NOT execute commands or make system changes (delegate to Chiron-Forge)
+- Do NOT handle communication drafting directly (Hermes's domain)
+- Do NOT access work documentation repositories (Athena's domain)
+- Do NOT access private vaults or personal notes (Apollo's domain)
+- Do NOT write long-form content (Calliope's domain)
+- Do NOT execute build or deployment tasks (Chiron-Forge's domain)
diff --git a/agents/hermes/agent.toml b/agents/hermes/agent.toml
new file mode 100644
index 0000000..a47b3ac
--- /dev/null
+++ b/agents/hermes/agent.toml
@@ -0,0 +1,35 @@
+# agents/hermes/agent.toml
+# Hermes — Work Communication Specialist
+
+name = "hermes"
+display_name = "Hermes (Communication)"
+description = "Work communication specialist. Handles Basecamp tasks, Outlook email, and MS Teams meetings"
+mode = "subagent"
+
+[permissions.question]
+intent = "allow"
+
+[permissions.webfetch]
+intent = "allow"
+
+[permissions.edit]
+intent = "allow"
+rules = [
+ "/run/agenix/**:deny",
+]
+
+[permissions.bash]
+intent = "ask"
+rules = [
+ "cat *:allow",
+ "echo *:allow",
+]
+
+[permissions.external_directory]
+intent = "ask"
+rules = [
+ "~/p/**:allow",
+ "~/.config/opencode/**:allow",
+ "/tmp/**:allow",
+ "/run/agenix/**:allow",
+]
diff --git a/agents/hermes/system-prompt.md b/agents/hermes/system-prompt.md
new file mode 100644
index 0000000..0bc9b15
--- /dev/null
+++ b/agents/hermes/system-prompt.md
@@ -0,0 +1,48 @@
+You are Hermes, the Greek god of communication, messengers, and swift transactions, specializing in work communication across Basecamp, Outlook, and Microsoft Teams.
+
+**Your Core Responsibilities:**
+1. Manage Basecamp tasks, projects, and todo items for collaborative work
+2. Draft and send professional emails via Outlook for work-related communication
+3. Schedule and manage Microsoft Teams meetings and channel conversations
+4. Provide quick status updates and task progress reports
+5. Coordinate communication between team members across platforms
+
+**Process:**
+1. **Identify Platform**: Determine which communication tool matches the user's request (Basecamp for tasks/projects, Outlook for email, Teams for meetings/chat)
+2. **Clarify Scope**: Use the Question tool to confirm recipients, project context, or meeting details when ambiguous
+3. **Execute Communication**: Use the appropriate MCP integration (Basecamp, Outlook, or Teams) to perform the action
+4. **Confirm Action**: Provide brief confirmation of what was sent, scheduled, or updated
+5. **Maintain Professionalism**: Ensure all communication adheres to workplace norms and etiquette
+
+**Quality Standards:**
+- Clear and concise messages that respect recipient time
+- Proper platform usage: use the right tool for the right task
+- Professional tone appropriate for workplace communication
+- Accurate meeting details with correct times and participants
+- Consistent follow-up tracking for tasks requiring action
+
+**Output Format:**
+- For Basecamp: Confirm todo created/updated, message posted, or card moved
+- For Outlook: Confirm email sent with subject line and recipient count
+- For Teams: Confirm meeting scheduled with date/time or message posted in channel
+- Brief status updates without unnecessary elaboration
+
+**Edge Cases:**
+- **Multiple platforms referenced**: Use Question to confirm which platform to use
+- **Unclear recipient**: Ask for specific names, email addresses, or team details
+- **Urgent communication**: Flag high-priority items appropriately
+- **Conflicting schedules**: Propose alternative meeting times when conflicts arise
+- **Sensitive content**: Verify appropriateness before sending to broader audiences
+
+**Tool Usage:**
+- Question tool: Required when platform choice is ambiguous or recipients are unclear
+- Basecamp MCP: For project tasks, todos, message board posts, campfire messages
+- Outlook MCP: For email drafting, sending, inbox management
+- Teams MCP: For meeting scheduling, channel messages, chat conversations
+
+**Boundaries:**
+- Do NOT handle documentation repositories or wiki knowledge (Athena's domain)
+- Do NOT access personal tools or private knowledge systems (Apollo's domain)
+- Do NOT write long-form content like reports or detailed documentation (Calliope's domain)
+- Do NOT execute code or perform technical tasks outside communication workflows
+- Do NOT share sensitive information inappropriately across platforms
diff --git a/flake.nix b/flake.nix
index 2730105..fb3fe11 100644
--- a/flake.nix
+++ b/flake.nix
@@ -1,188 +1,292 @@
{
description = "Opencode Agent Skills — development environment & runtime";
- inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; };
+ inputs = {nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";};
- outputs = { self, nixpkgs }:
- let
- supportedSystems = [ "x86_64-linux" "aarch64-linux" "aarch64-darwin" ];
- forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
- inherit (nixpkgs) lib;
- in {
+ outputs = {
+ self,
+ nixpkgs,
+ }: let
+ supportedSystems = ["x86_64-linux" "aarch64-linux" "aarch64-darwin"];
+ forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
+ inherit (nixpkgs) lib;
+ in {
+ # ── Skill composition library ──────────────────────────────────
+ #
+ # Merges custom skills with external skills.sh sources into a
+ # single directory suitable for ~/.config/opencode/skills or
+ # .agents/skills in project flakes.
+ #
+ # Usage (home-manager):
+ # xdg.configFile."opencode/skills".source =
+ # inputs.agents.lib.mkOpencodeSkills {
+ # pkgs = nixpkgs.legacyPackages.${system};
+ # customSkills = "${inputs.agents}/skills";
+ # externalSkills = [
+ # { src = inputs.skills-anthropic; }
+ # { src = inputs.skills-vercel; selectSkills = [ "find-skills" ]; }
+ # ];
+ # };
+ #
+ # Usage (project flake — project-level skills):
+ # ".agents/skills".source =
+ # inputs.agents.lib.mkOpencodeSkills {
+ # pkgs = nixpkgs.legacyPackages.${system};
+ # externalSkills = [
+ # { src = inputs.skills-anthropic; selectSkills = [ "mcp-builder" ]; }
+ # ];
+ # };
+ #
+ # Parameters:
+ # pkgs — nixpkgs package set (required)
+ # customSkills — path to a directory of skill subdirectories (optional)
+ # externalSkills — list of external skill sources (optional, default [])
+ # Each element is an attrset:
+ # src — path to repo root (flake input or local path)
+ # skillsDir — subdirectory containing skills (default "skills")
+ # selectSkills — list of skill names to include (default: all)
+ #
+ # Collision handling:
+ # Custom skills always take priority over external ones.
+ # Among external sources, earlier entries in the list take priority.
- # ── Skill composition library ──────────────────────────────────
- #
- # Merges custom skills with external skills.sh sources into a
- # single directory suitable for ~/.config/opencode/skills or
- # .agents/skills in project flakes.
- #
- # Usage (home-manager):
- # xdg.configFile."opencode/skills".source =
- # inputs.agents.lib.mkOpencodeSkills {
- # pkgs = nixpkgs.legacyPackages.${system};
- # customSkills = "${inputs.agents}/skills";
- # externalSkills = [
- # { src = inputs.skills-anthropic; }
- # { src = inputs.skills-vercel; selectSkills = [ "find-skills" ]; }
- # ];
- # };
- #
- # Usage (project flake — project-level skills):
- # ".agents/skills".source =
- # inputs.agents.lib.mkOpencodeSkills {
- # pkgs = nixpkgs.legacyPackages.${system};
- # externalSkills = [
- # { src = inputs.skills-anthropic; selectSkills = [ "mcp-builder" ]; }
- # ];
- # };
- #
- # Parameters:
- # pkgs — nixpkgs package set (required)
- # customSkills — path to a directory of skill subdirectories (optional)
- # externalSkills — list of external skill sources (optional, default [])
- # Each element is an attrset:
- # src — path to repo root (flake input or local path)
- # skillsDir — subdirectory containing skills (default "skills")
- # selectSkills — list of skill names to include (default: all)
- #
- # Collision handling:
- # Custom skills always take priority over external ones.
- # Among external sources, earlier entries in the list take priority.
+ lib.mkOpencodeSkills = {
+ pkgs,
+ customSkills ? null,
+ externalSkills ? [],
+ }: let
+ # Resolve a single external source into a list of { name, path } entries.
+ resolveExternal = entry: let
+ skillsRoot = "${entry.src}/${entry.skillsDir or "skills"}";
+ # List skill subdirectories (each must contain SKILL.md).
+ allSkillDirs = lib.pipe (builtins.readDir skillsRoot) [
+ (lib.filterAttrs (_: type: type == "directory"))
+ (dirs: lib.attrNames dirs)
+ ];
+ selected =
+ if entry ? selectSkills
+ then builtins.filter (name: builtins.elem name entry.selectSkills) allSkillDirs
+ else allSkillDirs;
+ in
+ map (name: {
+ inherit name;
+ path = "${skillsRoot}/${name}";
+ })
+ selected;
- lib.mkOpencodeSkills =
- { pkgs
- , customSkills ? null
- , externalSkills ? []
- }:
- let
- # Resolve a single external source into a list of { name, path } entries.
- resolveExternal = entry:
- let
- skillsRoot = "${entry.src}/${entry.skillsDir or "skills"}";
- # List skill subdirectories (each must contain SKILL.md).
- allSkillDirs = lib.pipe (builtins.readDir skillsRoot) [
- (lib.filterAttrs (_: type: type == "directory"))
- (dirs: lib.attrNames dirs)
- ];
- selected =
- if entry ? selectSkills
- then builtins.filter (name: builtins.elem name entry.selectSkills) allSkillDirs
- else allSkillDirs;
- in
- map (name: { inherit name; path = "${skillsRoot}/${name}"; }) selected;
+ # Collect all external skills, flattened.
+ allExternal = lib.concatMap resolveExternal externalSkills;
- # Collect all external skills, flattened.
- allExternal = lib.concatMap resolveExternal externalSkills;
+ # Collect custom skill names for collision detection.
+ customSkillNames =
+ if customSkills != null
+ then lib.attrNames (lib.filterAttrs (_: type: type == "directory") (builtins.readDir customSkills))
+ else [];
- # Collect custom skill names for collision detection.
- customSkillNames =
- if customSkills != null
- then lib.attrNames (lib.filterAttrs (_: type: type == "directory") (builtins.readDir customSkills))
- else [];
+ # Filter out external skills that collide with custom ones.
+ # Among externals, keep first occurrence (earlier sources win).
+ filterExternals = externals: let
+ go = acc: remaining:
+ if remaining == []
+ then acc.result
+ else let
+ head = builtins.head remaining;
+ tail = builtins.tail remaining;
+ isDuplicate = builtins.elem head.name acc.seen;
+ in
+ if isDuplicate
+ then go acc tail
+ else
+ go {
+ seen = acc.seen ++ [head.name];
+ result = acc.result ++ [head];
+ }
+ tail;
+ in
+ go {
+ seen = customSkillNames;
+ result = [];
+ }
+ externals;
- # Filter out external skills that collide with custom ones.
- # Among externals, keep first occurrence (earlier sources win).
- filterExternals = externals:
- let
- go = acc: remaining:
- if remaining == []
- then acc.result
- else
- let
- head = builtins.head remaining;
- tail = builtins.tail remaining;
- isDuplicate = builtins.elem head.name acc.seen;
- in
- if isDuplicate
- then go acc tail
- else go {
- seen = acc.seen ++ [ head.name ];
- result = acc.result ++ [ head ];
- } tail;
- in
- go { seen = customSkillNames; result = []; } externals;
+ filteredExternal = filterExternals allExternal;
- filteredExternal = filterExternals allExternal;
+ # Build a linkFarm entry for each external skill.
+ externalLinks =
+ map (skill: {
+ name = skill.name;
+ path = skill.path;
+ })
+ filteredExternal;
- # Build a linkFarm entry for each external skill.
- externalLinks = map (skill: {
- name = skill.name;
- path = skill.path;
- }) filteredExternal;
+ # Build a linkFarm entry for each custom skill.
+ customLinks =
+ if customSkills != null
+ then
+ map (name: {
+ inherit name;
+ path = "${customSkills}/${name}";
+ })
+ customSkillNames
+ else [];
+ in
+ pkgs.linkFarm "opencode-skills" (customLinks ++ externalLinks);
- # Build a linkFarm entry for each custom skill.
- customLinks =
- if customSkills != null
- then map (name: {
- inherit name;
- path = "${customSkills}/${name}";
- }) customSkillNames
- else [];
+ # ── Agent loader ───────────────────────────────────────────────
+ #
+ # Reads all canonical agents/*/agent.toml + agents/*/system-prompt.md
+ # files and returns an attrset keyed by agent slug.
+ #
+ # Each value has all fields from agent.toml plus:
+ # systemPrompt — full content of system-prompt.md
+ #
+ # Usage:
+ # inputs.agents.lib.loadAgents.chiron.description
+ # inputs.agents.lib.loadAgents.chiron.systemPrompt
+ lib.loadAgents = let
+ agentDirs = builtins.attrNames (
+ lib.filterAttrs (_: t: t == "directory") (builtins.readDir ./agents)
+ );
+ isAgentDir = name: builtins.pathExists ./agents/${name}/agent.toml;
+ loadAgent = name:
+ (builtins.fromTOML (builtins.readFile ./agents/${name}/agent.toml))
+ // {systemPrompt = builtins.readFile ./agents/${name}/system-prompt.md;};
+ in
+ builtins.listToAttrs (
+ map (name: {
+ inherit name;
+ value = loadAgent name;
+ })
+ (builtins.filter isAgentDir agentDirs)
+ );
+
+ # ── Backward-compat agents.json bridge ────────────────────────
+ #
+ # Produces an attrset semantically equivalent to agents/agents.json,
+ # keyed by display name (e.g. "Chiron (Assistant)").
+ #
+ # Suitable for embedding into opencode config.json via home-manager:
+ # programs.opencode.settings.agent = inputs.agents.lib.agentsJson;
+ #
+ # Shape per agent:
+ # description — agent purpose string
+ # mode — "primary" | "subagent"
+ # model — LLM model ID (fixed: "zai-coding-plan/glm-5")
+ # permission — reconstructed permission object
+ # prompt — "{file:./prompts/.txt}"
+
+ lib.agentsJson = let
+ model = "zai-coding-plan/glm-5";
+
+ # Convert a single permission section from canonical TOML two-level
+ # (intent + rules[]) into the JSON nested object shape.
+ # intent-only → simple string
+ # intent+rules → { "*": intent, pattern: action, ... }
+ renderPermSection = section:
+ if !(section ? rules) || section.rules == []
+ then section.intent
+ else let
+ # Parse "pattern:action" — split on first colon only.
+ parseRule = ruleStr: let
+ colonIdx = lib.strings.stringLength (
+ builtins.head (lib.strings.splitString ":" ruleStr)
+ );
+ pattern = builtins.substring 0 colonIdx ruleStr;
+ action = builtins.substring (colonIdx + 1) (lib.strings.stringLength ruleStr) ruleStr;
+ in {
+ name = pattern;
+ value = action;
+ };
+ ruleAttrs = builtins.listToAttrs (map parseRule section.rules);
in
- pkgs.linkFarm "opencode-skills" (customLinks ++ externalLinks);
+ {"*" = section.intent;} // ruleAttrs;
- # ── Composable runtime ─────────────────────────────────────────
- #
- # Runtime dependencies for skill scripts (Python packages, system
- # tools). Include in home.packages or project devShells.
- #
- # Usage:
- # home.packages = [ inputs.agents.packages.${system}.skills-runtime ];
- # devShells.default = pkgs.mkShell {
- # packages = [ inputs.agents.packages.${system}.skills-runtime ];
- # };
+ # Convert canonical permissions attrset to JSON permission object.
+ renderPermissions = perms:
+ builtins.mapAttrs (_: renderPermSection) perms;
- packages = forAllSystems (system:
- let
- pkgs = nixpkgs.legacyPackages.${system};
+ # Build one agent entry in the agentsJson shape.
+ renderAgent = slug: agent: {
+ description = agent.description + ".";
+ mode = agent.mode;
+ model = model;
+ permission = renderPermissions agent.permissions;
+ prompt = "{file:./prompts/${slug}.txt}";
+ };
- pythonEnv = pkgs.python3.withPackages (ps:
- with ps; [
- # skill-creator: quick_validate.py
- pyyaml
-
- # xlsx: recalc.py
- openpyxl
-
- # prompt-engineering-patterns: optimize-prompt.py
- numpy
-
- # pdf: multiple scripts
- pypdf
- pillow # PIL
- pdf2image
-
- # excalidraw: render_excalidraw.py
- playwright
- ]);
+ agents = self.lib.loadAgents;
+ in
+ builtins.listToAttrs (
+ map
+ (slug: let
+ agent = builtins.getAttr slug agents;
in {
- skills-runtime = pkgs.buildEnv {
- name = "opencode-skills-runtime";
- paths = [
- pythonEnv
- pkgs.poppler-utils # pdf: pdftoppm/pdfinfo
- pkgs.jq # shell scripts
- pkgs.playwright-driver.browsers # excalidraw: chromium for rendering
- ];
- };
- });
+ name = agent.display_name;
+ value = renderAgent slug agent;
+ })
+ (builtins.attrNames agents)
+ );
- # ── Dev shell ──────────────────────────────────────────────────
+ # ── Composable runtime ─────────────────────────────────────────
+ #
+ # Runtime dependencies for skill scripts (Python packages, system
+ # tools). Include in home.packages or project devShells.
+ #
+ # Usage:
+ # home.packages = [ inputs.agents.packages.${system}.skills-runtime ];
+ # devShells.default = pkgs.mkShell {
+ # packages = [ inputs.agents.packages.${system}.skills-runtime ];
+ # };
- devShells = forAllSystems (system:
- let
- pkgs = nixpkgs.legacyPackages.${system};
- in {
- default = pkgs.mkShell {
- packages = [ self.packages.${system}.skills-runtime ];
+ packages = forAllSystems (system: let
+ pkgs = nixpkgs.legacyPackages.${system};
- env.PLAYWRIGHT_BROWSERS_PATH = "${pkgs.playwright-driver.browsers}";
+ pythonEnv = pkgs.python3.withPackages (ps:
+ with ps; [
+ # skill-creator: quick_validate.py
+ pyyaml
- shellHook = ''
- echo "🔧 AGENTS dev shell active — Python $(python3 --version 2>&1 | cut -d' ' -f2), $(jq --version)"
- '';
- };
- });
- };
+ # xlsx: recalc.py
+ openpyxl
+
+ # prompt-engineering-patterns: optimize-prompt.py
+ numpy
+
+ # pdf: multiple scripts
+ pypdf
+ pillow # PIL
+ pdf2image
+
+ # excalidraw: render_excalidraw.py
+ playwright
+ ]);
+ in {
+ skills-runtime = pkgs.buildEnv {
+ name = "opencode-skills-runtime";
+ paths = [
+ pythonEnv
+ pkgs.poppler-utils # pdf: pdftoppm/pdfinfo
+ pkgs.jq # shell scripts
+ pkgs.playwright-driver.browsers # excalidraw: chromium for rendering
+ ];
+ };
+ });
+
+ # ── Dev shell ──────────────────────────────────────────────────
+
+ devShells = forAllSystems (system: let
+ pkgs = nixpkgs.legacyPackages.${system};
+ in {
+ default = pkgs.mkShell {
+ packages = [self.packages.${system}.skills-runtime];
+
+ env.PLAYWRIGHT_BROWSERS_PATH = "${pkgs.playwright-driver.browsers}";
+
+ shellHook = ''
+ echo "🔧 AGENTS dev shell active — Python $(python3 --version 2>&1 | cut -d' ' -f2), $(jq --version)"
+ '';
+ };
+ });
+ };
}