Builtin Block Types

A few Block Types are packaged with std.

In practical terms, Block Types distinguish themselves through the actions they provide to a particular Cell Block.

It is entirely possible to define custom Block Types with custom Actions according to the needs of your project.

Arion

{root}:
/*
Use the arion for arionCompose Jobs - https://docs.hercules-ci.com/arion/

Available actions:
  - up
  - ps
  - stop
  - rm
  - config
  - arion
*/
let
  inherit (root) mkCommand;
in
  name: {
    inherit name;
    type = "arion";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      pkgs = inputs.nixpkgs.${currentSystem};
      cmd = "arion --prebuilt-file ${target.config.out.dockerComposeYaml}";
    in [
      (mkCommand currentSystem "up" "arion up" [pkgs.arion] ''${cmd} up "$@" '' {})
      (mkCommand currentSystem "ps" "exec this arion task to ps" [pkgs.arion] ''${cmd} ps "$@" '' {})
      (mkCommand currentSystem "stop" "arion stop" [pkgs.arion] ''${cmd} stop "$@" '' {})
      (mkCommand currentSystem "rm" "arion rm" [pkgs.arion] ''${cmd} rm "$@" '' {})
      (mkCommand currentSystem "config" "check the docker-compose yaml file" [pkgs.arion] ''${cmd} config "$@" '' {})
      (mkCommand currentSystem "arion" "pass any command to arion" [pkgs.arion] ''${cmd} "$@" '' {})
    ];
  }

Runnables (todo: vs installables)

{
  root,
  super,
}:
/*
Use the Runnables Blocktype for targets that you want to
make accessible with a 'run' action on the TUI.
*/
let
  inherit (root) mkCommand actions;
  inherit (super) addSelectorFunctor;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "runnables";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: [
      (actions.build currentSystem target)
      (actions.run currentSystem target)
    ];
  }

Installables (todo: vs runnables)

{
  root,
  super,
  nixpkgs,
}:
/*
Use the Installables Blocktype for targets that you want to
make availabe for installation into the user's nix profile.

Available actions:
  - install
  - upgrade
  - remove
  - build
  - bundle
  - bundleImage
  - bundleAppImage
*/
let
  inherit (root) mkCommand actions;
  inherit (super) addSelectorFunctor;
  l = nixpkgs.lib // builtins;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "installables";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      escapedFragment = l.escapeShellArg fragment;
    in [
      (actions.build currentSystem target)
      # profile commands require a flake ref
      (mkCommand currentSystem "install" "install this target" [] ''
        # ${target}
        set -x
        nix profile install "$PRJ_ROOT#"${escapedFragment}
      '' {})
      (mkCommand currentSystem "upgrade" "upgrade this target" [] ''
        # ${target}
        set -x
        nix profile upgrade "$PRJ_ROOT#"${escapedFragment}
      '' {})
      (mkCommand currentSystem "remove" "remove this target" [] ''
        # ${target}
        set -x
        nix profile remove "$PRJ_ROOT#"${escapedFragment}
      '' {})
      # TODO: use target. `nix bundle` requires a flake ref, but we may be able to use nix-bundle instead as a workaround
      (mkCommand currentSystem "bundle" "bundle this target" [] ''
        # ${target}
        set -x
        nix bundle --bundler github:Ninlives/relocatable.nix --refresh "$PRJ_ROOT#"${escapedFragment}
      '' {})
      (mkCommand currentSystem "bundleImage" "bundle this target to image" [] ''
        # ${target}
        set -x
        nix bundle --bundler github:NixOS/bundlers#toDockerImage --refresh "$PRJ_ROOT#"${escapedFragment}
      '' {})
      (mkCommand currentSystem "bundleAppImage" "bundle this target to AppImage" [] ''
        # ${target}
        set -x
        nix bundle --bundler github:ralismark/nix-appimage --refresh "$PRJ_ROOT#"${escapedFragment}
      '' {})
    ];
  }

Pkgs

_:
/*
Use the Pkgs Blocktype if you need to construct your custom
variant of nixpkgs with overlays.

Targets will be excluded from the CLI / TUI  and thus not
slow them down.
*/
name: {
  inherit name;
  type = "pkgs";
  cli = false; # its special power
}

Devshells

{  
  root,
  super,
}:
/*
Use the Devshells Blocktype for devShells.

Available actions:
  - build
  - enter
*/
let
  inherit (root) mkCommand actions devshellDrv;
  inherit (super) addSelectorFunctor;
  inherit (builtins) unsafeDiscardStringContext;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "devshells";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      developDrv = devshellDrv target;
    in [
      (actions.build currentSystem target)
      (mkCommand currentSystem "enter" "enter this devshell" [] ''
        profile_path="$PRJ_DATA_HOME/${fragmentRelPath}"
        mkdir -p "$profile_path"
        # ${developDrv}
        nix_args=(
          "${unsafeDiscardStringContext developDrv.drvPath}"
          "--no-update-lock-file"
          "--no-write-lock-file"
          "--no-warn-dirty"
          "--accept-flake-config"
          "--no-link"
          "--build-poll-interval" "0"
          "--builders-use-substitutes"
        )
        nix build "''${nix_args[@]}" --profile "$profile_path/shell-profile"
        _SHELL="$SHELL"
        eval "$(nix print-dev-env ${developDrv})"
        SHELL="$_SHELL"
        if ! [[ -v STD_DIRENV ]]; then
          if declare -F __devshell-motd &>/dev/null; then
            __devshell-motd
          fi
          exec $SHELL -i
        fi
      '' {})
    ];
  }

Nixago

{root}:
/*
Use the Nixago Blocktype for nixago pebbles.

Use Nixago pebbles to ensure files are present
or symlinked into your repository. You may typically
use this for repo dotfiles.

For more information, see: https://github.com/nix-community/nixago.

Available actions:
  - ensure
  - explore
*/
let
  inherit (root) mkCommand;
in
  name: {
    inherit name;
    type = "nixago";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      pkgs = inputs.nixpkgs.${currentSystem};
    in [
      (mkCommand currentSystem "populate" "populate this nixago file into the repo" [] ''
        ${target.install}/bin/nixago_shell_hook
      '' {})
      (mkCommand currentSystem "explore" "interactively explore the nixago file" [pkgs.bat] ''
        bat "${target.configFile}"
      '' {})
    ];
  }

Containers

{
  trivial,
  root,
  super,
}:
/*
Use the Containers Blocktype for OCI-images built with nix2container.

Available actions:
  - print-image
  - publish
  - load
*/
let
  inherit (root) mkCommand actions;
  inherit (super) addSelectorFunctor;
  inherit (builtins) readFile toFile;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "containers";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      inherit (inputs.n2c.packages.${currentSystem}) skopeo-nix2container;
      triv = trivial.${currentSystem};
      proviso = ./containers-proviso.sh;

      tags' =
        builtins.toFile "${target.name}-tags.json" (builtins.concatStringsSep "\n" target.image.tags);
      copyFn = ''
        copy() {
          local uri prev_tag
          uri=$1
          shift

          for tag in $(<${tags'}); do
            if ! [[ -v prev_tag ]]; then
              skopeo --insecure-policy copy nix:${target} "$uri:$tag" "$@"
            else
              # speedup: copy from the previous tag to avoid superflous network bandwidth
              skopeo --insecure-policy copy "$uri:$prev_tag" "$uri:$tag" "$@"
            fi
            echo "Done: $uri:$tag"

            prev_tag="$tag"
          done
        }
      '';
    in [
      (actions.build currentSystem target)
      (mkCommand currentSystem "print-image" "print out the image.repo with all tags" [] ''
        echo
        for tag in $(<${tags'}); do
          echo "${target.image.repo}:$tag"
        done
      '' {})
      (mkCommand currentSystem "publish" "copy the image to its remote registry" [skopeo-nix2container] ''
          ${copyFn}
          copy docker://${target.image.repo}
        '' {
          meta.image = target.image.name;
          inherit proviso;
        })
      (mkCommand currentSystem "load" "load image to the local docker daemon" [skopeo-nix2container] ''
        ${copyFn}
        if command -v podman &> /dev/null; then
           echo "Podman detected: copy to local podman"
           copy containers-storage:${target.image.repo} "$@"
        fi
        if command -v docker &> /dev/null; then
           echo "Docker detected: copy to local docker"
           copy docker-daemon:${target.image.repo} "$@"
        fi
      '' {})
    ];
  }

Terra

Block type for managing Terranix configuration for Terraform.

{
  root,
  super,
}:
/*
Use the Terra Blocktype for terraform configurations managed by terranix.

Important! You need to specify the state repo on the blocktype, e.g.:

[
  (terra "infra" "[email protected]:myorg/myrepo.git")
]

Available actions:
  - init
  - plan
  - apply
  - state
  - refresh
  - destroy
*/
let
  inherit (root) mkCommand;
  inherit (super) addSelectorFunctor postDiffToGitHubSnippet;
in
  name: repo: {
    inherit name;
    __functor = addSelectorFunctor;
    type = "terra";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      inherit (inputs) terranix;
      pkgs = inputs.nixpkgs.${currentSystem};

      git = {
        inherit repo;
        ref = "main";
        state = fragmentRelPath + "/state.json";
      };

      terraEval = import (terranix + /core/default.nix);
      terraformConfiguration = builtins.toFile "config.tf.json" (builtins.toJSON
        (terraEval {
          inherit pkgs; # only effectively required for `pkgs.lib`
          terranix_config = {
            _file = fragmentRelPath;
            imports = [target];
          };
          strip_nulls = true;
        })
        .config);

      setup = ''
        export TF_VAR_fragment=${pkgs.lib.strings.escapeShellArg fragment}
        export TF_VAR_fragmentRelPath=${fragmentRelPath}
        export TF_IN_AUTOMATION=1
        export TF_DATA_DIR="$PRJ_DATA_HOME/${fragmentRelPath}"
        export TF_PLUGIN_CACHE_DIR="$PRJ_CACHE_HOME/tf-plugin-cache"
        mkdir -p "$TF_DATA_DIR"
        mkdir -p "$TF_PLUGIN_CACHE_DIR"
        dir="$PRJ_ROOT/.tf/${fragmentRelPath}/.tf"
        mkdir -p "$dir"
        cat << MESSAGE > "$dir/readme.md"
        This is a tf staging area.
        It is motivated by the terraform CLI requiring to be executed in a staging area.
        MESSAGE

        if [[ -e "$dir/config.tf.json" ]]; then rm -f "$dir/config.tf.json"; fi
        jq '.' ${terraformConfiguration} > "$dir/config.tf.json"
      '';
      wrap = cmd: ''
        ${setup}

        # Run the command and capture output
        terraform-backend-git git \
           --dir "$dir" \
           --repository ${git.repo} \
           --ref ${git.ref} \
           --state ${git.state} \
           terraform ${cmd} "$@" \
           ${pkgs.lib.optionalString (cmd == "plan") ''
             -lock=false -no-color | tee "$PRJ_CACHE_HOME/tf.console.txt"
           ''}

        # Pass output to the snippet
        ${pkgs.lib.optionalString (cmd == "plan") ''
          output=$(cat "$PRJ_CACHE_HOME/tf.console.txt")
          summary_plan=$(tac "$PRJ_CACHE_HOME/tf.console.txt" | grep -m 1 -E '^(Error:|Plan:|Apply complete!|No changes.|Success)' | tac || echo "View output.")
          summary="<code>std ${fragmentRelPath}:${cmd}</code>: $summary_plan" 
          ${postDiffToGitHubSnippet "${fragmentRelPath}:${cmd}" "$output" "$summary"}
        ''}
      '';
      
    in [
      (mkCommand currentSystem "init" "tf init" [pkgs.jq pkgs.terraform pkgs.terraform-backend-git] (wrap "init") {})
      (mkCommand currentSystem "plan" "tf plan" [pkgs.jq pkgs.terraform pkgs.terraform-backend-git] (wrap "plan") {})
      (mkCommand currentSystem "apply" "tf apply" [pkgs.jq pkgs.terraform pkgs.terraform-backend-git] (wrap "apply") {})
      (mkCommand currentSystem "state" "tf state" [pkgs.jq pkgs.terraform pkgs.terraform-backend-git] (wrap "state") {})
      (mkCommand currentSystem "refresh" "tf refresh" [pkgs.jq pkgs.terraform pkgs.terraform-backend-git] (wrap "refresh") {})
      (mkCommand currentSystem "destroy" "tf destroy" [pkgs.jq pkgs.terraform pkgs.terraform-backend-git] (wrap "destroy") {})
      (mkCommand currentSystem "terraform" "pass any command to terraform" [pkgs.jq pkgs.terraform pkgs.terraform-backend-git] (wrap "") {})
    ];
  }

Data

{
  trivial,
  root,
}:
/*
Use the Data Blocktype for json serializable data.

Available actions:
  - write
  - explore

For all actions is true:
  Nix-proper 'stringContext'-carried dependency will be realized
  to the store, if present.
*/
let
  inherit (root) mkCommand;
  inherit (builtins) toJSON concatStringsSep;
in
  name: {
    inherit name;
    type = "data";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      inherit (inputs.nixpkgs.${currentSystem}) pkgs;
      triv = trivial.${currentSystem};

      # if target ? __std_data_wrapper, then we need to unpack from `.data`
      json = triv.writeTextFile {
        name = "data.json";
        text = toJSON (
          if target ? __std_data_wrapper
          then target.data
          else target
        );
      };
    in [
      (mkCommand currentSystem "write" "write to file" [] "echo ${json}" {})
      (mkCommand currentSystem "explore" "interactively explore" [pkgs.fx] (
        concatStringsSep "\t" ["fx" json]
      ) {})
    ];
  }

Functions

_:
/*
Use the Functions Blocktype for reusable nix functions that you would
call elswhere in the code.

Also use this for all types of modules and profiles, since they are
implemented as functions.

Consequently, there are no actions available for functions.
*/
name: {
  inherit name;
  type = "functions";
}

Anything

Note: while the implementation is the same as functions, the semantics are different. Implementations may diverge in the future.

_:
/*
Use the Anything Blocktype as a fallback.

It doesn't have actions.
*/
name: {
  inherit name;
  type = "anything";
}

Kubectl

Block type for rendering deployment manifests for the Kubernetes Cluster scheduler. Each named attribtute-set under the block contains a set of deployment manifests.

{
  trivial,
  root,
  super,
  dmerge,
}:
/*
Use the `kubectl` Blocktype for rendering deployment manifests
for the Kubernetes Cluster scheduler. Each named attribtute-set under the
block contains a set of deployment manifests.

Available actions:
  - render
  - deploy
  - explore
*/
let
  inherit (root) mkCommand;
  inherit (super) addSelectorFunctor askUserToProceedSnippet postDiffToGitHubSnippet;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "kubectl";

    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      inherit (inputs.nixpkgs) lib;
      pkgs = inputs.nixpkgs.${currentSystem};
      triv = trivial.${currentSystem};

      manifest_path = fragmentRelPath;

      checkedRev = inputs.std.std.errors.bailOnDirty ''
        Will not render manifests from a dirty tree.
        Otherwise we cannot keep good track of deployment history.''
      inputs.self.rev;

      usesKustomize = target ? kustomization || target ? Kustomization;

      augment = let
        amendIfExists = path: rhs: manifest:
          if true == lib.hasAttrByPath path manifest
          then amendAlways rhs manifest
          else manifest;

        amendAlways = rhs: manifest: dmerge manifest rhs;
      in
        target:
          lib.mapAttrs (
            key:
              lib.flip lib.pipe [
                # metadata
                (
                  manifest:
                    if manifest ? metadata.labels && manifest.metadata.labels == null
                    then lib.recursiveUpdate manifest {metadata.labels = {};}
                    else manifest
                )
                (
                  amendIfExists ["metadata"]
                  {
                    metadata.labels."app.kubernetes.io/version" = checkedRev;
                    metadata.labels."app.kubernetes.io/managed-by" = "std-kubectl";
                  }
                )
                (
                  if usesKustomize && (key == "kustomization" || key == "Kustomization")
                  # ensure a kustomization picks up the preprocessed resources
                  then
                    (manifest:
                      manifest
                      // {
                        resources =
                          map
                          (n: "${n}.json")
                          (builtins.attrNames (builtins.removeAttrs target ["meta" "Kustomization" "kustomization"]));
                      })
                  else lib.id
                )
              ]
          ) (builtins.removeAttrs target ["meta"]);

      generateManifests = target: let
        writeManifest = name: manifest:
          builtins.toFile name (builtins.unsafeDiscardStringContext (builtins.toJSON manifest));

        renderManifests = lib.mapAttrsToList (name: manifest: ''
          cp ${writeManifest name manifest} ${
            if name == "kustomization" || name == "Kustomization"
            then "Kustomization"
            else "${name}.json"
          }
        '');
      in
        triv.runCommandLocal "generate-k8s-manifests" {} ''
          mkdir -p $out
          cd $out
          ${lib.concatStrings (renderManifests (augment target))}
        '';

      build = ''
        declare manifest_path="$PRJ_DATA_HOME/${manifest_path}"
        build() {
          echo "Buiding manifests..."
          echo
          rm -rf "$manifest_path"
          mkdir -p "$(dirname "$manifest_path")"
          ln -s "${generateManifests target}" "$manifest_path"
          echo "Manifests built in: $manifest_path"
        }
      '';
    in [
      /*
      The `render` action will take this Nix manifest descrition, convert it to JSON,
      inject the git revision validate the manifest, after which it can be run or
      planned with the kubectl cli or the `deploy` action.
      */
      (mkCommand currentSystem "render" "Build the JSON manifests" [] ''
        ${build}
        build
      '' {})
      (mkCommand currentSystem "diff" "Diff the manifests against the cluster" [pkgs.kubectl pkgs.icdiff] ''
        ${build}
        build

        diff() {
          kubectl diff ${
          if usesKustomize
          then "--kustomize"
          else "--recursive --filename"
        } "$manifest_path/";
        }

        ${postDiffToGitHubSnippet "${fragmentRelPath}:diff" "$(diff || true)" "<code>std ${fragmentRelPath}:diff</code>"}

        KUBECTL_EXTERNAL_DIFF="icdiff -N -r"
        export KUBECTL_EXTERNAL_DIFF
        diff
      '' {})
      (mkCommand currentSystem "apply" "Apply the manifests to K8s" [pkgs.kubectl pkgs.icdiff] ''
        ${build}
        build

        KUBECTL_EXTERNAL_DIFF="icdiff -N -r"
        export KUBECTL_EXTERNAL_DIFF

        diff() {
          kubectl diff --server-side=true --field-manager="std-action-$(whoami)" ${
          if usesKustomize
          then "--kustomize"
          else "--recursive --filename"
        } "$manifest_path/";

          return $?;
        }

        run() {
          kubectl apply --server-side=true --field-manager="std-action-$(whoami)" ${
          if usesKustomize
          then "--kustomize"
          else "--recursive --filename"
        } "$manifest_path/";
        }

        diff
        ret=$?
        if [[ $ret == 0 ]] || [[ $ret == 1 ]]; then
          ${askUserToProceedSnippet "apply" "run"}
        fi
      '' {})
      (mkCommand currentSystem "explore" "Interactively explore the manifests" [pkgs.fx] ''
        fx ${
          builtins.toFile "explore-k8s-manifests.json"
          (builtins.unsafeDiscardStringContext (builtins.toJSON (augment target)))
        }
      '' {})
    ];
  }

Files (todo: vs data)

{root}:
/*
Use the Files Blocktype for any text data.

Available actions:
  - explore
*/
let
  inherit (root) mkCommand;
in
  name: {
    inherit name;
    type = "files";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      file = toString target;
      pkgs = inputs.nixpkgs.${currentSystem};
    in [
      (mkCommand currentSystem "explore" "interactively explore with bat" [pkgs.bat] ''
        bat ${file}
      '' {})
    ];
  }

Microvms

Block type for managing microvm.nix configuration for declaring lightweight NixOS virtual machines.

{root}:
/*
Use the Microvms Blocktype for Microvm.nix - https://github.com/astro/microvm.nix

Available actions:
  - run
  - console
  - microvm
*/
let
  inherit (root) mkCommand;
in
  name: {
    inherit name;
    type = "microvms";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: [
      (mkCommand currentSystem "run" "run the microvm" [] ''
        ${target.config.microvm.runner.${target.config.microvm.hypervisor}}/bin/microvm-run
      '' {})
      (mkCommand currentSystem "console" "enter the microvm console" [] ''
        ${target.config.microvm.runner.${target.config.microvm.hypervisor}}/bin/microvm-console
      '' {})
      (mkCommand currentSystem "microvm" "pass any command to microvm" [] ''
        ${target.config.microvm.runner.${target.config.microvm.hypervisor}}/bin/microvm-"$@"
      '' {})
    ];
  }

Namaka

Block type for declaring Namaka snapshot tests.

{
  root,
  super,
}: let
  inherit (root) mkCommand;
  inherit (super) addSelectorFunctor;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "namaka";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      pkg = inputs.namaka.packages.${currentSystem}.default;
      subdir = target.snap-dir or "";
    in [
      (mkCommand currentSystem "eval" "use transparently with namaka cli" [] ''
        nix eval '.#${fragment}'
      '' {})
      (mkCommand currentSystem "check" "run namaka tests against snapshots" [pkg] ''
        namaka ${subdir} check -c nix eval '.#${fragment}'
      '' {})
      (mkCommand currentSystem "review" "review pending namaka checks" [pkg] ''
        namaka ${subdir} review -c nix eval '.#${fragment}'
      '' {})
      (mkCommand currentSystem "clean" "clean up pending namaka checks" [pkg] ''
        namaka ${subdir} clean -c nix eval '.#${fragment}'
      '' {})
    ];
  }

Nixostests

Block type for declaring VM-based tests for NixOS.

{
  root,
  super,
}:
/*
Use the NixosTests Blocktype in order to instrucement nixos
vm-based test inside your reporisory.

Available actions:
  - run
  - run-vm
  - audit-script
  - run-vm-+
*/
let
  inherit (root) mkCommand actions;
  inherit (super) addSelectorFunctor;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "nixostests";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      pkgs = inputs.nixpkgs.${currentSystem};
      inherit (pkgs) lib;
      inherit (pkgs.stdenv) isLinux;
    in
      [
        (mkCommand currentSystem "run" "run tests in headless vm" [] ''
          # ${target.driver}
          ${target.driver}/bin/nixos-test-driver
        '' {})
        (mkCommand currentSystem "audit-script" "audit the test script" [pkgs.bat] ''
          # ${target.driver}
          bat --language py ${target.driver}/test-script
        '' {})
        (mkCommand currentSystem "run-vm" "run tests interactively in vm" [] ''
          # ${target.driverInteractive}
          ${target.driverInteractive}/bin/nixos-test-driver
        '' {})
        (mkCommand currentSystem "run-vm+" "run tests with state from last run" [] ''
          # ${target.driverInteractive}
          ${target.driverInteractive}/bin/nixos-test-driver --keep-vm-state
        '' {})
      ]
      ++ lib.optionals isLinux [
        (mkCommand currentSystem "iptables+" "setup nat redirect 80->8080 & 443->4433" [pkgs.iptables] ''
          sudo iptables \
            --table nat \
            --insert OUTPUT \
            --proto tcp \
            --destination 127.0.0.1 \
            --dport 443 \
            --jump REDIRECT \
            --to-ports 4433
          sudo iptables \
            --table nat \
            --insert OUTPUT \
            --proto tcp \
            --destination 127.0.0.1 \
            --dport 80 \
            --jump REDIRECT \
            --to-ports 8080
        '' {})
        (mkCommand currentSystem "iptables-" "remove nat redirect 80->8080 & 443->4433" [pkgs.iptables] ''
          sudo iptables \
            --table nat \
            --delete OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 443 -j REDIRECT --to-ports 4433
          sudo iptables \
            --table nat \
            --delete OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8080
        '' {})
      ];
  }

Nomad

Block type for rendering job descriptions for the Nomad Cluster scheduler.

{
  nixpkgs,
  root,
  super,
}:
/*
Use the `nomad` Block Type for rendering job descriptions
for the Nomad Cluster scheduler. Each named attribtute-set under the
block contains a valid Nomad job description, written in Nix.

Available actions:
  - render
  - deploy
  - explore
*/
let
  inherit (root) mkCommand;
  inherit (super) addSelectorFunctor askUserToProceedSnippet;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "nomadJobManifests";

    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      inherit (nixpkgs) lib;
      pkgs = inputs.nixpkgs.${currentSystem};

      job_name = baseNameOf fragmentRelPath;
      job_path = "${dirOf fragmentRelPath}/${job_name}.json";

      jobWithGitRevision = target: let
        checkedRev = inputs.std.std.errors.bailOnDirty ''
          Will not render jobs from a dirty tree.
          Otherwise we cannot keep good track of deployment history.''
        inputs.self.rev;
        job = builtins.mapAttrs (_: v: lib.recursiveUpdate v {meta.rev = checkedRev;}) target.job;
      in
        builtins.toFile "${job_name}.json" (builtins.unsafeDiscardStringContext (builtins.toJSON {inherit job;}));
      render = ''
        declare job_path="$PRJ_DATA_HOME/${job_path}"
        render() {
          echo "Rendering to $job_path..."
          rm -rf "$job_path"
          ln -s "${jobWithGitRevision target}" "$job_path"
          if status=$(nomad validate "$job_path"); then
            echo "$status for $job_path"
          fi
        }
      '';
    in [
      /*
      The `render` action will take this Nix job descrition, convert it to JSON,
      inject the git revision validate the manifest, after which it can be run or
      planned with the Nomad cli or the `deploy` action.
      */
      (mkCommand currentSystem "render" "build the JSON job description" [pkgs.nomad] ''
        ${render}
        render
      '' {})
      (mkCommand currentSystem "deploy" "Deploy the job to Nomad" [pkgs.nomad pkgs.jq] ''
        ${render}
        render
        if ! plan_results=$(nomad plan -force-color "$job_path"); then
          echo "$plan_results"
          run() { echo "$plan_results" | grep 'nomad job run -check-index'; }
          ${askUserToProceedSnippet "deploy" "run"}
        else
          echo "Job hasn't changed since last deployment, nothing to deploy"
        fi
      '' {})
      (mkCommand currentSystem "explore" "interactively explore the Job defintion" [pkgs.nomad pkgs.fx] ''
        ${render}
        render
        fx "$job_path"
      '' {})
    ];
  }

Nvfetcher

Block type for managing nvfetcher configuration for updating package definition sources.

{
  root,
  super,
}:
/*
Use the nvfetcher Blocktype in order to generate package sources
with nvfetcher. See its docs for more details.

Available actions:
  - fetch
*/
let
  inherit (root) mkCommand actions;
  inherit (super) addSelectorFunctor;
in
  name: {
    __functor = addSelectorFunctor;
    inherit name;
    type = "nvfetcher";
    actions = {
      currentSystem,
      fragment,
      fragmentRelPath,
      target,
      inputs,
    }: let
      pkgs = inputs.nixpkgs.${currentSystem};
      inherit (pkgs) lib;
      inherit (pkgs.stdenv) isLinux;
    in [
      (mkCommand currentSystem "fetch" "update source" [pkgs.nvfetcher] ''
         targetname="$(basename ${fragmentRelPath})"
         blockpath="$(dirname ${fragmentRelPath})"
         cellpath="$(dirname "$blockpath")"
         tmpfile="$(mktemp)"
         updates="$PRJ_ROOT/${fragmentRelPath}.md"
         nvfetcher \
           --config "$PRJ_ROOT/$cellpath/nvfetcher.toml" \
           --build-dir "$PRJ_ROOT/$blockpath" \
           --changelog "$tmpfile" \
           --filter "^$targetname$"

        sed -i '''' -e "s|^|- \`$(date --iso-8601=m)\` |" "$tmpfile"
        cat "$tmpfile" >> "$updates"
      '' {})
    ];
  }