reformat
This commit is contained in:
parent
857140bbd2
commit
44d265ecdb
|
@ -3,7 +3,8 @@
|
|||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
boot = lib.mkIf (!config.boot.isContainer) {
|
||||
initrd.systemd = {
|
||||
enable = true;
|
||||
|
@ -11,12 +12,23 @@
|
|||
extraBin.ip = "${pkgs.iproute}/bin/ip";
|
||||
extraBin.cryptsetup = "${pkgs.cryptsetup}/bin/cryptsetup";
|
||||
users.root.shell = "${pkgs.bashInteractive}/bin/bash";
|
||||
storePaths = ["${pkgs.bashInteractive}/bin/bash"];
|
||||
storePaths = [ "${pkgs.bashInteractive}/bin/bash" ];
|
||||
};
|
||||
|
||||
initrd.availableKernelModules = ["xhci_pci" "nvme" "r8169" "usb_storage" "usbhid" "sd_mod" "rtsx_pci_sdmmc" "ahci" "uas" "tpm_crb"];
|
||||
supportedFilesystems = ["ntfs"];
|
||||
kernelModules = ["kvm-intel"];
|
||||
initrd.availableKernelModules = [
|
||||
"xhci_pci"
|
||||
"nvme"
|
||||
"r8169"
|
||||
"usb_storage"
|
||||
"usbhid"
|
||||
"sd_mod"
|
||||
"rtsx_pci_sdmmc"
|
||||
"ahci"
|
||||
"uas"
|
||||
"tpm_crb"
|
||||
];
|
||||
supportedFilesystems = [ "ntfs" ];
|
||||
kernelModules = [ "kvm-intel" ];
|
||||
kernelParams = [
|
||||
"rd.luks.options=timeout=0"
|
||||
"rootflags=x-systemd.device-timeout=0"
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ inputs, lib, ... }:
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
./boot.nix
|
||||
./home-manager.nix
|
||||
|
@ -37,6 +34,6 @@
|
|||
inputs.nixos-nftables-firewall.nixosModules.default
|
||||
inputs.nixvim.nixosModules.nixvim
|
||||
];
|
||||
age.identityPaths = ["/state/etc/ssh/ssh_host_ed25519_key"];
|
||||
age.identityPaths = [ "/state/etc/ssh/ssh_host_ed25519_key" ];
|
||||
boot.mode = lib.mkDefault "efi";
|
||||
}
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
pkgs,
|
||||
nodes,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
../../modules-hm/impermanence.nix
|
||||
../../modules-hm/images.nix
|
||||
|
@ -18,9 +19,7 @@
|
|||
spicePkgs = inputs.spicetify-nix.legacyPackages.${pkgs.system};
|
||||
};
|
||||
sharedModules = [
|
||||
{
|
||||
home.stateVersion = stateVersion;
|
||||
}
|
||||
{ home.stateVersion = stateVersion; }
|
||||
inputs.nix-index-database.hmModules.nix-index
|
||||
inputs.nixos-extra-modules.homeManagerModules.default
|
||||
inputs.nixvim.homeManagerModules.nixvim
|
||||
|
@ -38,5 +37,5 @@
|
|||
|
||||
# But still link all completions from all packages so they
|
||||
# can be found by zsh
|
||||
environment.pathsToLink = ["/share/zsh"];
|
||||
environment.pathsToLink = [ "/share/zsh" ];
|
||||
}
|
||||
|
|
|
@ -3,35 +3,23 @@
|
|||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
onlyHost =
|
||||
lib.mkIf (!config.boot.isContainer);
|
||||
prune = folder:
|
||||
pkgs.writers.writePython3Bin "impermanence-prune" {} ''
|
||||
}:
|
||||
let
|
||||
onlyHost = lib.mkIf (!config.boot.isContainer);
|
||||
prune =
|
||||
folder:
|
||||
pkgs.writers.writePython3Bin "impermanence-prune" { } ''
|
||||
import os
|
||||
import sys
|
||||
mounts = [${
|
||||
lib.concatStringsSep ", "
|
||||
((map (x:
|
||||
"\""
|
||||
+ (
|
||||
if x.home != null
|
||||
then x.home + "/"
|
||||
else ""
|
||||
)
|
||||
+ x.directory
|
||||
+ "\"")
|
||||
config.environment.persistence.${folder}.directories)
|
||||
++ (map (x:
|
||||
"\""
|
||||
+ (
|
||||
if x.home != null
|
||||
then x.home + "/"
|
||||
else ""
|
||||
)
|
||||
+ x.file
|
||||
+ "\"")
|
||||
config.environment.persistence.${folder}.files))
|
||||
lib.concatStringsSep ", " (
|
||||
(map (
|
||||
x: "\"" + (if x.home != null then x.home + "/" else "") + x.directory + "\""
|
||||
) config.environment.persistence.${folder}.directories)
|
||||
++ (map (
|
||||
x: "\"" + (if x.home != null then x.home + "/" else "") + x.file + "\""
|
||||
) config.environment.persistence.${folder}.files)
|
||||
)
|
||||
}] # noqa: E501
|
||||
mounts = [os.path.normpath(x) for x in mounts]
|
||||
mounts.sort()
|
||||
|
@ -53,11 +41,10 @@
|
|||
file=sys.stderr)
|
||||
print("\n".join(erg))
|
||||
'';
|
||||
in {
|
||||
in
|
||||
{
|
||||
# to allow all users to access hm managed persistent folders
|
||||
lib.scripts.impermanence.pruneScripts =
|
||||
lib.mapAttrs (k: _: prune k)
|
||||
config.environment.persistence;
|
||||
lib.scripts.impermanence.pruneScripts = lib.mapAttrs (k: _: prune k) config.environment.persistence;
|
||||
programs.fuse.userAllowOther = true;
|
||||
services.openssh.hostKeys = lib.mkForce [
|
||||
{
|
||||
|
@ -68,15 +55,10 @@ in {
|
|||
environment.persistence."/state" = {
|
||||
hideMounts = true;
|
||||
|
||||
files =
|
||||
[
|
||||
"/etc/ssh/ssh_host_ed25519_key"
|
||||
"/etc/ssh/ssh_host_ed25519_key.pub"
|
||||
]
|
||||
++ lib.lists.optionals (!config.boot.isContainer)
|
||||
[
|
||||
"/etc/machine-id"
|
||||
];
|
||||
files = [
|
||||
"/etc/ssh/ssh_host_ed25519_key"
|
||||
"/etc/ssh/ssh_host_ed25519_key.pub"
|
||||
] ++ lib.lists.optionals (!config.boot.isContainer) [ "/etc/machine-id" ];
|
||||
directories = [
|
||||
"/var/log"
|
||||
"/var/lib/systemd"
|
||||
|
@ -93,22 +75,20 @@ in {
|
|||
};
|
||||
environment.persistence."/persist" = {
|
||||
hideMounts = true;
|
||||
directories = [];
|
||||
directories = [ ];
|
||||
};
|
||||
fileSystems."/persist".neededForBoot = true;
|
||||
fileSystems."/state".neededForBoot = true;
|
||||
|
||||
# After importing the rpool, rollback the root system to be empty.
|
||||
boot.initrd.systemd.services.impermanence-root =
|
||||
onlyHost
|
||||
{
|
||||
wantedBy = ["initrd.target"];
|
||||
after = ["zfs-import-rpool.service"];
|
||||
before = ["sysroot.mount"];
|
||||
unitConfig.DefaultDependencies = "no";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
|
||||
};
|
||||
boot.initrd.systemd.services.impermanence-root = onlyHost {
|
||||
wantedBy = [ "initrd.target" ];
|
||||
after = [ "zfs-import-rpool.service" ];
|
||||
before = [ "sysroot.mount" ];
|
||||
unitConfig.DefaultDependencies = "no";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,17 +1,13 @@
|
|||
{ lib, config, ... }:
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
networking = {
|
||||
useNetworkd = true;
|
||||
dhcpcd.enable = false;
|
||||
useDHCP = false;
|
||||
# allow mdns port
|
||||
firewall.allowedUDPPorts = [5353];
|
||||
firewall.allowedUDPPorts = [ 5353 ];
|
||||
renameInterfacesByMac = lib.mkIf (!config.boot.isContainer) (
|
||||
lib.mapAttrs (_: v: v.mac)
|
||||
(config.secrets.secrets.local.networking.interfaces or {})
|
||||
lib.mapAttrs (_: v: v.mac) (config.secrets.secrets.local.networking.interfaces or { })
|
||||
);
|
||||
};
|
||||
systemd.network = {
|
||||
|
@ -19,8 +15,8 @@
|
|||
wait-online.anyInterface = true;
|
||||
};
|
||||
system.nssDatabases.hosts = lib.mkMerge [
|
||||
(lib.mkBefore ["mdns_minimal [NOTFOUND=return]"])
|
||||
(lib.mkAfter ["mdns"])
|
||||
(lib.mkBefore [ "mdns_minimal [NOTFOUND=return]" ])
|
||||
(lib.mkAfter [ "mdns" ])
|
||||
];
|
||||
services.resolved = {
|
||||
enable = true;
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
networking.nftables = {
|
||||
stopRuleset = lib.mkDefault ''
|
||||
table inet filter {
|
||||
|
@ -36,20 +33,31 @@
|
|||
nnf-ssh.enable = true;
|
||||
nnf-icmp = {
|
||||
enable = true;
|
||||
ipv6Types = ["echo-request" "destination-unreachable" "packet-too-big" "time-exceeded" "parameter-problem" "nd-router-advert" "nd-neighbor-solicit" "nd-neighbor-advert"];
|
||||
ipv4Types = ["echo-request" "destination-unreachable" "router-advertisement" "time-exceeded" "parameter-problem"];
|
||||
ipv6Types = [
|
||||
"echo-request"
|
||||
"destination-unreachable"
|
||||
"packet-too-big"
|
||||
"time-exceeded"
|
||||
"parameter-problem"
|
||||
"nd-router-advert"
|
||||
"nd-neighbor-solicit"
|
||||
"nd-neighbor-advert"
|
||||
];
|
||||
ipv4Types = [
|
||||
"echo-request"
|
||||
"destination-unreachable"
|
||||
"router-advertisement"
|
||||
"time-exceeded"
|
||||
"parameter-problem"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
rules.untrusted-to-local = {
|
||||
from = ["untrusted"];
|
||||
to = ["local"];
|
||||
from = [ "untrusted" ];
|
||||
to = [ "local" ];
|
||||
|
||||
inherit
|
||||
(config.networking.firewall)
|
||||
allowedTCPPorts
|
||||
allowedUDPPorts
|
||||
;
|
||||
inherit (config.networking.firewall) allowedTCPPorts allowedUDPPorts;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
{ inputs, stateVersion, ... }:
|
||||
{
|
||||
inputs,
|
||||
stateVersion,
|
||||
...
|
||||
}: {
|
||||
nix = {
|
||||
settings = {
|
||||
auto-optimise-store = true;
|
||||
allowed-users = ["@wheel"];
|
||||
trusted-users = ["root"];
|
||||
system-features = ["recursive-nix" "repl-flake" "big-parallel"];
|
||||
allowed-users = [ "@wheel" ];
|
||||
trusted-users = [ "root" ];
|
||||
system-features = [
|
||||
"recursive-nix"
|
||||
"repl-flake"
|
||||
"big-parallel"
|
||||
];
|
||||
substituters = [
|
||||
"https://nix-community.cachix.org"
|
||||
"https://cache.nixos.org"
|
||||
|
@ -24,7 +25,7 @@
|
|||
cores = 0;
|
||||
max-jobs = "auto";
|
||||
# make agenix rekey find the secrets even without trusted user
|
||||
extra-sandbox-paths = ["/var/tmp/agenix-rekey?"];
|
||||
extra-sandbox-paths = [ "/var/tmp/agenix-rekey?" ];
|
||||
};
|
||||
daemonCPUSchedPolicy = "batch";
|
||||
daemonIOSchedPriority = 5;
|
||||
|
@ -34,7 +35,7 @@
|
|||
experimental-features = nix-command flakes recursive-nix
|
||||
flake-registry = /etc/nix/registry.json
|
||||
'';
|
||||
nixPath = ["nixpkgs=/run/current-system/nixpkgs"];
|
||||
nixPath = [ "nixpkgs=/run/current-system/nixpkgs" ];
|
||||
optimise.automatic = true;
|
||||
gc = {
|
||||
automatic = true;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{lib, ...}: {
|
||||
{ lib, ... }:
|
||||
{
|
||||
# Enable the OpenSSH daemon.
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
|
|
|
@ -5,24 +5,24 @@
|
|||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
system.stateVersion = stateVersion;
|
||||
|
||||
age.rekey = {
|
||||
inherit
|
||||
(inputs.self.secretsConfig)
|
||||
masterIdentities
|
||||
extraEncryptionPubkeys
|
||||
;
|
||||
inherit (inputs.self.secretsConfig) masterIdentities extraEncryptionPubkeys;
|
||||
|
||||
storageMode = "derivation";
|
||||
|
||||
forceRekeyOnSystem = builtins.extraBuiltins.unsafeCurrentSystem;
|
||||
hostPubkey = let
|
||||
pubkeyPath = config.node.secretsDir + "/host.pub";
|
||||
in
|
||||
lib.mkIf (lib.pathExists pubkeyPath || lib.trace "Missing pubkey for ${config.node.name}: ${toString pubkeyPath} not found, using dummy replacement key for now." false)
|
||||
pubkeyPath;
|
||||
hostPubkey =
|
||||
let
|
||||
pubkeyPath = config.node.secretsDir + "/host.pub";
|
||||
in
|
||||
lib.mkIf (
|
||||
lib.pathExists pubkeyPath
|
||||
|| lib.trace "Missing pubkey for ${config.node.name}: ${toString pubkeyPath} not found, using dummy replacement key for now." false
|
||||
) pubkeyPath;
|
||||
generatedSecretsDir = config.node.secretsDir + "/generated/";
|
||||
cacheDir = "/var/tmp/agenix-rekey/\"$UID\"";
|
||||
};
|
||||
|
@ -38,16 +38,16 @@
|
|||
# to create a link called /run/agenix. Agenix should probably fail in this case,
|
||||
# but doesn't and instead puts the generation link into the existing directory.
|
||||
# TODO See https://github.com/ryantm/agenix/pull/187.
|
||||
system.activationScripts = lib.mkIf (config.age.secrets != {}) {
|
||||
system.activationScripts = lib.mkIf (config.age.secrets != { }) {
|
||||
removeAgenixLink.text = "[[ ! -L /run/agenix ]] && [[ -d /run/agenix ]] && rm -rf /run/agenix";
|
||||
agenixNewGeneration.deps = ["removeAgenixLink"];
|
||||
agenixNewGeneration.deps = [ "removeAgenixLink" ];
|
||||
};
|
||||
|
||||
time.timeZone = lib.mkDefault "Europe/Berlin";
|
||||
i18n.defaultLocale = "C.UTF-8";
|
||||
console = {
|
||||
font = "${pkgs.terminus_font}/share/consolefonts/ter-v28n.psf.gz";
|
||||
packages = with pkgs; [terminus_font];
|
||||
packages = with pkgs; [ terminus_font ];
|
||||
useXkbConfig = true; # use xkbOptions in tty.
|
||||
keyMap = lib.mkDefault "de-latin1-nodeadkeys";
|
||||
};
|
||||
|
@ -71,11 +71,12 @@
|
|||
|
||||
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
|
||||
|
||||
secrets.secretFiles = let
|
||||
local = config.node.secretsDir + "/secrets.nix.age";
|
||||
in
|
||||
secrets.secretFiles =
|
||||
let
|
||||
local = config.node.secretsDir + "/secrets.nix.age";
|
||||
in
|
||||
{
|
||||
global = ../../secrets/secrets.nix.age;
|
||||
}
|
||||
// lib.optionalAttrs (config.node.name != null && lib.pathExists local) {inherit local;};
|
||||
// lib.optionalAttrs (config.node.name != null && lib.pathExists local) { inherit local; };
|
||||
}
|
||||
|
|
|
@ -1,49 +1,51 @@
|
|||
{
|
||||
users.mutableUsers = false;
|
||||
users.deterministicIds = let
|
||||
uidGid = id: {
|
||||
uid = id;
|
||||
gid = id;
|
||||
users.deterministicIds =
|
||||
let
|
||||
uidGid = id: {
|
||||
uid = id;
|
||||
gid = id;
|
||||
};
|
||||
in
|
||||
{
|
||||
nscd = uidGid 201;
|
||||
sshd = uidGid 202;
|
||||
tss = uidGid 203;
|
||||
rtkit = uidGid 204;
|
||||
nixseparatedebuginfod = uidGid 205;
|
||||
wireshark = uidGid 206;
|
||||
polkituser = uidGid 207;
|
||||
msr = uidGid 208;
|
||||
avahi = uidGid 209;
|
||||
fwupd-refresh = uidGid 210;
|
||||
podman = uidGid 211;
|
||||
acme = uidGid 212;
|
||||
nextcloud = uidGid 213;
|
||||
redis-nextcloud = uidGid 214;
|
||||
radicale = uidGid 215;
|
||||
git = uidGid 215;
|
||||
vaultwarden = uidGid 215;
|
||||
redis-paperless = uidGid 216;
|
||||
microvm = uidGid 217;
|
||||
maddy = uidGid 218;
|
||||
tt_rss = uidGid 219;
|
||||
freshrss = uidGid 220;
|
||||
mongodb = uidGid 221;
|
||||
authelia-main = uidGid 222;
|
||||
kanidm = uidGid 223;
|
||||
oauth2-proxy = uidGid 224;
|
||||
influxdb2 = uidGid 225;
|
||||
firefly-iii = uidGid 226;
|
||||
paperless = uidGid 315;
|
||||
systemd-oom = uidGid 300;
|
||||
systemd-coredump = uidGid 301;
|
||||
patrick = uidGid 1000;
|
||||
smb = uidGid 2000;
|
||||
david = uidGid 2004;
|
||||
helen = uidGid 2001;
|
||||
ggr = uidGid 2002;
|
||||
family = uidGid 2003;
|
||||
printer = uidGid 2005;
|
||||
pr-tracker = uidGid 2006;
|
||||
};
|
||||
in {
|
||||
nscd = uidGid 201;
|
||||
sshd = uidGid 202;
|
||||
tss = uidGid 203;
|
||||
rtkit = uidGid 204;
|
||||
nixseparatedebuginfod = uidGid 205;
|
||||
wireshark = uidGid 206;
|
||||
polkituser = uidGid 207;
|
||||
msr = uidGid 208;
|
||||
avahi = uidGid 209;
|
||||
fwupd-refresh = uidGid 210;
|
||||
podman = uidGid 211;
|
||||
acme = uidGid 212;
|
||||
nextcloud = uidGid 213;
|
||||
redis-nextcloud = uidGid 214;
|
||||
radicale = uidGid 215;
|
||||
git = uidGid 215;
|
||||
vaultwarden = uidGid 215;
|
||||
redis-paperless = uidGid 216;
|
||||
microvm = uidGid 217;
|
||||
maddy = uidGid 218;
|
||||
tt_rss = uidGid 219;
|
||||
freshrss = uidGid 220;
|
||||
mongodb = uidGid 221;
|
||||
authelia-main = uidGid 222;
|
||||
kanidm = uidGid 223;
|
||||
oauth2-proxy = uidGid 224;
|
||||
influxdb2 = uidGid 225;
|
||||
firefly-iii = uidGid 226;
|
||||
paperless = uidGid 315;
|
||||
systemd-oom = uidGid 300;
|
||||
systemd-coredump = uidGid 301;
|
||||
patrick = uidGid 1000;
|
||||
smb = uidGid 2000;
|
||||
david = uidGid 2004;
|
||||
helen = uidGid 2001;
|
||||
ggr = uidGid 2002;
|
||||
family = uidGid 2003;
|
||||
printer = uidGid 2005;
|
||||
pr-tracker = uidGid 2006;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
{pkgs, ...}: {
|
||||
environment.systemPackages = with pkgs; [bluetuith];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ bluetuith ];
|
||||
|
||||
hardware.bluetooth = {
|
||||
enable = true;
|
||||
powerOnBoot = false;
|
||||
disabledPlugins = ["sap"];
|
||||
disabledPlugins = [ "sap" ];
|
||||
settings = {
|
||||
General = {
|
||||
FastConnectable = "true";
|
||||
|
@ -16,15 +17,13 @@
|
|||
};
|
||||
|
||||
hardware.pulseaudio = {
|
||||
package = pkgs.pulseaudio.override {bluetoothSupport = true;};
|
||||
package = pkgs.pulseaudio.override { bluetoothSupport = true; };
|
||||
extraConfig = ''
|
||||
load-module module-bluetooth-discover
|
||||
load-module module-bluetooth-policy
|
||||
load-module module-switch-on-connect
|
||||
'';
|
||||
extraModules = with pkgs; [pulseaudio-modules-bt];
|
||||
extraModules = with pkgs; [ pulseaudio-modules-bt ];
|
||||
};
|
||||
environment.persistence."/state".directories = [
|
||||
"/var/lib/bluetooth"
|
||||
];
|
||||
environment.persistence."/state".directories = [ "/var/lib/bluetooth" ];
|
||||
}
|
||||
|
|
|
@ -1,3 +1 @@
|
|||
{
|
||||
services.joycond.enable = true;
|
||||
}
|
||||
{ services.joycond.enable = true; }
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
...
|
||||
}:
|
||||
lib.optionalAttrs (!minimal) {
|
||||
services.xserver.videoDrivers = lib.mkForce ["nvidia"];
|
||||
services.xserver.videoDrivers = lib.mkForce [ "nvidia" ];
|
||||
|
||||
hardware = {
|
||||
graphics = {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Configuration for actual physical machines
|
||||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
hardware = {
|
||||
enableRedistributableFirmware = true;
|
||||
enableAllFirmware = true;
|
||||
|
@ -8,6 +9,6 @@
|
|||
services = {
|
||||
fwupd.enable = true;
|
||||
smartd.enable = true;
|
||||
thermald.enable = builtins.elem config.nixpkgs.hostPlatform.system ["x86_64-linux"];
|
||||
thermald.enable = builtins.elem config.nixpkgs.hostPlatform.system [ "x86_64-linux" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -13,7 +13,10 @@ lib.optionalAttrs (!minimal) {
|
|||
# packages = pkgs.linuxPackages_6_6_rt;
|
||||
# };
|
||||
#};
|
||||
environment.systemPackages = with pkgs; [pulseaudio pulsemixer];
|
||||
environment.systemPackages = with pkgs; [
|
||||
pulseaudio
|
||||
pulsemixer
|
||||
];
|
||||
|
||||
hardware.pulseaudio.enable = lib.mkForce false;
|
||||
security.rtkit.enable = true;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
yubikey-personalization
|
||||
yubikey-manager
|
||||
|
@ -7,5 +8,8 @@
|
|||
|
||||
services.pcscd.enable = true;
|
||||
|
||||
services.udev.packages = with pkgs; [yubikey-personalization libu2f-host];
|
||||
services.udev.packages = with pkgs; [
|
||||
yubikey-personalization
|
||||
libu2f-host
|
||||
];
|
||||
}
|
||||
|
|
|
@ -4,13 +4,11 @@
|
|||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
mkOption
|
||||
types
|
||||
;
|
||||
in {
|
||||
}:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.hidpi = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
|
@ -18,14 +16,10 @@ in {
|
|||
};
|
||||
|
||||
# stylix acceses stylix options on import meaning you can only import this module when you're actually setting stylix options
|
||||
imports = [
|
||||
inputs.stylix.nixosModules.stylix
|
||||
];
|
||||
imports = [ inputs.stylix.nixosModules.stylix ];
|
||||
|
||||
config = {
|
||||
environment.systemPackages = with pkgs; [
|
||||
xdg-utils
|
||||
];
|
||||
environment.systemPackages = with pkgs; [ xdg-utils ];
|
||||
xdg.portal = {
|
||||
xdgOpenUsePortal = true;
|
||||
enable = true;
|
||||
|
@ -38,13 +32,11 @@ in {
|
|||
"gtk"
|
||||
"hyprland"
|
||||
];
|
||||
sway.default = [
|
||||
"wlr"
|
||||
];
|
||||
sway.default = [ "wlr" ];
|
||||
};
|
||||
};
|
||||
# needed for gnome pinentry
|
||||
services.dbus.packages = [pkgs.gcr];
|
||||
services.dbus.packages = [ pkgs.gcr ];
|
||||
fonts = {
|
||||
enableGhostscriptFonts = false;
|
||||
fontDir.enable = false;
|
||||
|
@ -75,7 +67,7 @@ in {
|
|||
'';
|
||||
};
|
||||
packages = with pkgs; [
|
||||
(nerdfonts.override {fonts = ["FiraCode"];})
|
||||
(nerdfonts.override { fonts = [ "FiraCode" ]; })
|
||||
ibm-plex
|
||||
dejavu_fonts
|
||||
unifont
|
||||
|
@ -160,71 +152,73 @@ in {
|
|||
};
|
||||
|
||||
home-manager.sharedModules = [
|
||||
({
|
||||
pkgs,
|
||||
config,
|
||||
nixosConfig,
|
||||
...
|
||||
}: {
|
||||
stylix = {
|
||||
cursor = {
|
||||
package = pkgs.openzone-cursors;
|
||||
name = "OpenZone_White_Slim";
|
||||
size =
|
||||
if nixosConfig.hidpi
|
||||
then 48
|
||||
else 18;
|
||||
};
|
||||
inherit (nixosConfig.stylix) polarity;
|
||||
targets = {
|
||||
gtk.enable = true;
|
||||
bat.enable = true;
|
||||
dunst.enable = true;
|
||||
zathura.enable = true;
|
||||
xresources.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
xresources.properties = {
|
||||
"Xft.hinting" = true;
|
||||
"Xft.antialias" = true;
|
||||
"Xft.autohint" = false;
|
||||
"Xft.lcdfilter" = "lcddefault";
|
||||
"Xft.hintstyle" = "hintfull";
|
||||
"Xft.rgba" = "rgb";
|
||||
};
|
||||
|
||||
gtk = let
|
||||
gtk34extraConfig = {
|
||||
gtk-application-prefer-dark-theme = 1;
|
||||
gtk-cursor-theme-size = 18;
|
||||
gtk-enable-animations = true;
|
||||
gtk-xft-antialias = 1;
|
||||
gtk-xft-dpi = 96; # XXX: delete for wayland?
|
||||
gtk-xft-hinting = 1;
|
||||
gtk-xft-hintstyle = "hintfull";
|
||||
gtk-xft-rgba = "rgb";
|
||||
};
|
||||
in {
|
||||
enable = true;
|
||||
iconTheme = {
|
||||
name = "Vimix-Doder";
|
||||
package = pkgs.vimix-icon-theme;
|
||||
(
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
nixosConfig,
|
||||
...
|
||||
}:
|
||||
{
|
||||
stylix = {
|
||||
cursor = {
|
||||
package = pkgs.openzone-cursors;
|
||||
name = "OpenZone_White_Slim";
|
||||
size = if nixosConfig.hidpi then 48 else 18;
|
||||
};
|
||||
inherit (nixosConfig.stylix) polarity;
|
||||
targets = {
|
||||
gtk.enable = true;
|
||||
bat.enable = true;
|
||||
dunst.enable = true;
|
||||
zathura.enable = true;
|
||||
xresources.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
gtk2.extraConfig = "gtk-application-prefer-dark-theme = true";
|
||||
gtk3.extraConfig = gtk34extraConfig;
|
||||
gtk4.extraConfig = gtk34extraConfig;
|
||||
};
|
||||
xresources.properties = {
|
||||
"Xft.hinting" = true;
|
||||
"Xft.antialias" = true;
|
||||
"Xft.autohint" = false;
|
||||
"Xft.lcdfilter" = "lcddefault";
|
||||
"Xft.hintstyle" = "hintfull";
|
||||
"Xft.rgba" = "rgb";
|
||||
};
|
||||
|
||||
home.sessionVariables.GTK_THEME = config.gtk.theme.name;
|
||||
gtk =
|
||||
let
|
||||
gtk34extraConfig = {
|
||||
gtk-application-prefer-dark-theme = 1;
|
||||
gtk-cursor-theme-size = 18;
|
||||
gtk-enable-animations = true;
|
||||
gtk-xft-antialias = 1;
|
||||
gtk-xft-dpi = 96; # XXX: delete for wayland?
|
||||
gtk-xft-hinting = 1;
|
||||
gtk-xft-hintstyle = "hintfull";
|
||||
gtk-xft-rgba = "rgb";
|
||||
};
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
iconTheme = {
|
||||
name = "Vimix-Doder";
|
||||
package = pkgs.vimix-icon-theme;
|
||||
};
|
||||
|
||||
qt = {
|
||||
enable = true;
|
||||
platformTheme.name = "adwaita";
|
||||
style.name = "Adwaita-Dark";
|
||||
};
|
||||
})
|
||||
gtk2.extraConfig = "gtk-application-prefer-dark-theme = true";
|
||||
gtk3.extraConfig = gtk34extraConfig;
|
||||
gtk4.extraConfig = gtk34extraConfig;
|
||||
};
|
||||
|
||||
home.sessionVariables.GTK_THEME = config.gtk.theme.name;
|
||||
|
||||
qt = {
|
||||
enable = true;
|
||||
platformTheme.name = "adwaita";
|
||||
style.name = "Adwaita-Dark";
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ config, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
age.secrets.initrd_host_ed25519_key.generator.script = "ssh-ed25519";
|
||||
|
||||
boot.initrd.network.enable = true;
|
||||
|
@ -14,7 +11,7 @@
|
|||
# need two activations to change as well as that to enable this
|
||||
# module you need to set hostKeys to a dummy value and generate
|
||||
# and invalid initrd once
|
||||
hostKeys = [config.age.secrets.initrd_host_ed25519_key.path];
|
||||
hostKeys = [ config.age.secrets.initrd_host_ed25519_key.path ];
|
||||
};
|
||||
|
||||
# Make sure that there is always a valid initrd hostkey available that can be installed into
|
||||
|
@ -30,7 +27,10 @@
|
|||
${pkgs.openssh}/bin/ssh-keygen -t ed25519 -N "" -f "${config.age.secrets.initrd_host_ed25519_key.path}"
|
||||
fi
|
||||
'';
|
||||
deps = ["agenixInstall" "users"];
|
||||
deps = [
|
||||
"agenixInstall"
|
||||
"users"
|
||||
];
|
||||
};
|
||||
system.activationScripts.agenixChown.deps = ["agenixEnsureInitrdHostkey"];
|
||||
system.activationScripts.agenixChown.deps = [ "agenixEnsureInitrdHostkey" ];
|
||||
}
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.printing = {
|
||||
enable = true;
|
||||
drivers = [pkgs.hplipWithPlugin pkgs.hplip];
|
||||
drivers = [
|
||||
pkgs.hplipWithPlugin
|
||||
pkgs.hplip
|
||||
];
|
||||
};
|
||||
environment.persistence."/state".directories = [
|
||||
{
|
||||
|
|
|
@ -8,10 +8,7 @@
|
|||
lib.optionalAttrs (!minimal) {
|
||||
environment.systemPackages = [
|
||||
# For debugging and troubleshooting Secure Boot.
|
||||
(pkgs.sbctl.override
|
||||
{
|
||||
databasePath = "/run/secureboot";
|
||||
})
|
||||
(pkgs.sbctl.override { databasePath = "/run/secureboot"; })
|
||||
];
|
||||
age.secrets.secureboot.rekeyFile = ../../hosts/${config.node.name}/secrets/secureboot.tar.age;
|
||||
system.activationScripts.securebootuntar = {
|
||||
|
@ -21,7 +18,7 @@ lib.optionalAttrs (!minimal) {
|
|||
chmod 700 /run/secureboot
|
||||
${pkgs.gnutar}/bin/tar xf ${config.age.secrets.secureboot.path} -C /run/secureboot || true
|
||||
'';
|
||||
deps = ["agenix"];
|
||||
deps = [ "agenix" ];
|
||||
};
|
||||
|
||||
# Lanzaboote currently replaces the systemd-boot module.
|
||||
|
|
|
@ -8,8 +8,8 @@ lib.optionalAttrs (!minimal) {
|
|||
programs.steam = {
|
||||
enable = true;
|
||||
package = pkgs.steam.override {
|
||||
extraPkgs = pkgs:
|
||||
with pkgs; [
|
||||
extraPkgs =
|
||||
pkgs: with pkgs; [
|
||||
# vampir überlebende braucht diese pkgs
|
||||
libgdiplus
|
||||
cups
|
||||
|
|
|
@ -14,10 +14,10 @@ lib.optionalAttrs (!minimal) {
|
|||
enable = true;
|
||||
xdgOpenUsePortal = true;
|
||||
config.common = {
|
||||
"org.freedesktop.impl.portal.Secret" = ["gnome-keyring"];
|
||||
"org.freedesktop.impl.portal.ScreenCast" = ["hyprland"];
|
||||
"org.freedesktop.impl.portal.Screenshot" = ["hyprland"];
|
||||
"org.freedesktop.portal.FileChooser" = ["xdg-desktop-portal-gtk"];
|
||||
"org.freedesktop.impl.portal.Secret" = [ "gnome-keyring" ];
|
||||
"org.freedesktop.impl.portal.ScreenCast" = [ "hyprland" ];
|
||||
"org.freedesktop.impl.portal.Screenshot" = [ "hyprland" ];
|
||||
"org.freedesktop.portal.FileChooser" = [ "xdg-desktop-portal-gtk" ];
|
||||
};
|
||||
extraPortals = [
|
||||
pkgs.xdg-desktop-portal-hyprland
|
||||
|
|
|
@ -12,7 +12,7 @@ lib.optionalAttrs (!minimal) {
|
|||
displayManager.startx.enable = true;
|
||||
autoRepeatDelay = 235;
|
||||
autoRepeatInterval = 60;
|
||||
videoDrivers = ["modesetting"];
|
||||
videoDrivers = [ "modesetting" ];
|
||||
};
|
||||
services.libinput = {
|
||||
enable = true;
|
||||
|
@ -28,10 +28,9 @@ lib.optionalAttrs (!minimal) {
|
|||
disableWhileTyping = true;
|
||||
};
|
||||
};
|
||||
services.udev.extraRules = let
|
||||
exe =
|
||||
pkgs.writeShellScript "set-key-repeat"
|
||||
''
|
||||
services.udev.extraRules =
|
||||
let
|
||||
exe = pkgs.writeShellScript "set-key-repeat" ''
|
||||
if [ -d "/tmp/.X11-unix" ]; then
|
||||
for D in /tmp/.X11-unix/*; do
|
||||
file=$(${pkgs.coreutils}/bin/basename $D)
|
||||
|
@ -43,7 +42,8 @@ lib.optionalAttrs (!minimal) {
|
|||
done
|
||||
fi
|
||||
'';
|
||||
in ''
|
||||
ACTION=="add", SUBSYSTEM=="input", ATTRS{bInterfaceClass}=="03", RUN+="${exe}"
|
||||
'';
|
||||
in
|
||||
''
|
||||
ACTION=="add", SUBSYSTEM=="input", ATTRS{bInterfaceClass}=="03", RUN+="${exe}"
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -3,15 +3,16 @@
|
|||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
boot.supportedFilesystems = ["zfs"];
|
||||
}:
|
||||
{
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
boot.kernelPackages = lib.mkDefault config.boot.zfs.package.latestCompatibleLinuxPackages;
|
||||
|
||||
# The root pool should never be imported forcefully.
|
||||
# Failure to import is important to notice!
|
||||
boot.zfs.forceImportRoot = false;
|
||||
|
||||
environment.systemPackages = with pkgs; [zfs];
|
||||
environment.systemPackages = with pkgs; [ zfs ];
|
||||
|
||||
# Might help with hangs mainly atuin
|
||||
#boot.kernelPatches = [
|
||||
|
@ -40,5 +41,5 @@
|
|||
};
|
||||
};
|
||||
# TODO remove once this is upstreamed
|
||||
boot.initrd.systemd.services."zfs-import-rpool".after = ["cryptsetup.target"];
|
||||
boot.initrd.systemd.services."zfs-import-rpool".after = [ "cryptsetup.target" ];
|
||||
}
|
||||
|
|
|
@ -1,16 +1,12 @@
|
|||
{
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [3000];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
imports = [../actual.nix];
|
||||
imports = [ ../actual.nix ];
|
||||
services.actual = {
|
||||
enable = true;
|
||||
settings.port = 3000;
|
||||
};
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = "/var/lib/private/actual";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist".directories = [ { directory = "/var/lib/private/actual"; } ];
|
||||
}
|
||||
|
|
|
@ -1,11 +1,8 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [config.services.adguardhome.port];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ config.services.adguardhome.port ];
|
||||
};
|
||||
services.adguardhome = {
|
||||
enable = true;
|
||||
|
@ -16,8 +13,12 @@
|
|||
settings = {
|
||||
dns = {
|
||||
bind_hosts = [
|
||||
(lib.net.cidr.host config.secrets.secrets.global.net.ips.${config.node.name} config.secrets.secrets.global.net.privateSubnetv4)
|
||||
(lib.net.cidr.host config.secrets.secrets.global.net.ips.${config.node.name} config.secrets.secrets.global.net.privateSubnetv6)
|
||||
(lib.net.cidr.host config.secrets.secrets.global.net.ips.${config.node.name}
|
||||
config.secrets.secrets.global.net.privateSubnetv4
|
||||
)
|
||||
(lib.net.cidr.host config.secrets.secrets.global.net.ips.${config.node.name}
|
||||
config.secrets.secrets.global.net.privateSubnetv6
|
||||
)
|
||||
];
|
||||
anonymize_client_ip = false;
|
||||
upstream_dns = [
|
||||
|
@ -61,8 +62,8 @@
|
|||
};
|
||||
};
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [53];
|
||||
allowedUDPPorts = [53];
|
||||
allowedTCPPorts = [ 53 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
age.secrets.cloudflare_token_dns = {
|
||||
rekeyFile = config.node.secretsDir + "/cloudflare_api_token.age";
|
||||
mode = "440";
|
||||
|
@ -13,6 +14,6 @@
|
|||
usev4 = "webv4, webv4='https://cloudflare.com/cdn-cgi/trace', webv4-skip='ip='";
|
||||
usev6 = "";
|
||||
passwordFile = config.age.secrets.cloudflare_token_dns.path;
|
||||
domains = [config.secrets.secrets.global.domains.web];
|
||||
domains = [ config.secrets.secrets.global.domains.web ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,12 +1,9 @@
|
|||
{ config, nodes, ... }:
|
||||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: {
|
||||
i18n.supportedLocales = ["all"];
|
||||
i18n.supportedLocales = [ "all" ];
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [80];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
|
||||
age.secrets.appKey = {
|
||||
|
|
|
@ -4,9 +4,11 @@
|
|||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
forgejoDomain = "forge.${config.secrets.secrets.global.domains.web}";
|
||||
in {
|
||||
in
|
||||
{
|
||||
age.secrets.resticpasswd = {
|
||||
generator.script = "alnum";
|
||||
};
|
||||
|
@ -29,7 +31,7 @@ in {
|
|||
inherit (config.secrets.secrets.global.hetzner.users.forgejo) subUid path;
|
||||
sshAgeSecret = "forgejoHetznerSsh";
|
||||
};
|
||||
paths = [config.services.forgejo.stateDir];
|
||||
paths = [ config.services.forgejo.stateDir ];
|
||||
pruneOpts = [
|
||||
"--keep-daily 10"
|
||||
"--keep-weekly 7"
|
||||
|
@ -42,7 +44,7 @@ in {
|
|||
# Recommended by forgejo: https://forgejo.org/docs/latest/admin/recommendations/#git-over-ssh
|
||||
services.openssh.settings.AcceptEnv = "GIT_PROTOCOL";
|
||||
|
||||
users.groups.git = {};
|
||||
users.groups.git = { };
|
||||
users.users.git = {
|
||||
isSystemUser = true;
|
||||
useDefaultShell = true;
|
||||
|
@ -52,9 +54,11 @@ in {
|
|||
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [config.services.forgejo.settings.server.HTTP_PORT];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [
|
||||
config.services.forgejo.settings.server.HTTP_PORT
|
||||
];
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [config.services.forgejo.settings.server.SSH_PORT];
|
||||
networking.firewall.allowedTCPPorts = [ config.services.forgejo.settings.server.SSH_PORT ];
|
||||
|
||||
environment.persistence."/panzer".directories = [
|
||||
{
|
||||
|
@ -145,30 +149,31 @@ in {
|
|||
# see https://github.com/go-gitea/gitea/issues/21376.
|
||||
systemd.services.forgejo = {
|
||||
serviceConfig.RestartSec = "60"; # Retry every minute
|
||||
preStart = let
|
||||
exe = lib.getExe config.services.forgejo.package;
|
||||
providerName = "kanidm";
|
||||
clientId = "forgejo";
|
||||
args = lib.escapeShellArgs [
|
||||
"--name"
|
||||
providerName
|
||||
"--provider"
|
||||
"openidConnect"
|
||||
"--key"
|
||||
clientId
|
||||
"--auto-discover-url"
|
||||
"https://auth.${config.secrets.secrets.global.domains.web}/oauth2/openid/${clientId}/.well-known/openid-configuration"
|
||||
"--scopes"
|
||||
"email"
|
||||
"--scopes"
|
||||
"profile"
|
||||
"--group-claim-name"
|
||||
"groups"
|
||||
"--admin-group"
|
||||
"admin"
|
||||
"--skip-local-2fa"
|
||||
];
|
||||
in
|
||||
preStart =
|
||||
let
|
||||
exe = lib.getExe config.services.forgejo.package;
|
||||
providerName = "kanidm";
|
||||
clientId = "forgejo";
|
||||
args = lib.escapeShellArgs [
|
||||
"--name"
|
||||
providerName
|
||||
"--provider"
|
||||
"openidConnect"
|
||||
"--key"
|
||||
clientId
|
||||
"--auto-discover-url"
|
||||
"https://auth.${config.secrets.secrets.global.domains.web}/oauth2/openid/${clientId}/.well-known/openid-configuration"
|
||||
"--scopes"
|
||||
"email"
|
||||
"--scopes"
|
||||
"profile"
|
||||
"--group-claim-name"
|
||||
"groups"
|
||||
"--admin-group"
|
||||
"admin"
|
||||
"--skip-local-2fa"
|
||||
];
|
||||
in
|
||||
lib.mkAfter ''
|
||||
provider_id=$(${exe} admin auth list | ${pkgs.gnugrep}/bin/grep -w '${providerName}' | cut -f1)
|
||||
SECRET="$(< ${config.age.secrets.openid-secret.path})"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
{
|
||||
imports = [../../modules/homebox.nix];
|
||||
imports = [ ../../modules/homebox.nix ];
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [3000];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
services.homebox = {
|
||||
enable = true;
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
nodes,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
version = "v1.106.4";
|
||||
immichDomain = "immich.${config.secrets.secrets.global.domains.web}";
|
||||
|
||||
|
@ -136,21 +137,14 @@
|
|||
serviceConfig = {
|
||||
Restart = "always";
|
||||
};
|
||||
after = [
|
||||
"podman-network-immich-default.service"
|
||||
];
|
||||
requires = [
|
||||
"podman-network-immich-default.service"
|
||||
];
|
||||
partOf = [
|
||||
"podman-compose-immich-root.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"podman-compose-immich-root.target"
|
||||
];
|
||||
after = [ "podman-network-immich-default.service" ];
|
||||
requires = [ "podman-network-immich-default.service" ];
|
||||
partOf = [ "podman-compose-immich-root.target" ];
|
||||
wantedBy = [ "podman-compose-immich-root.target" ];
|
||||
};
|
||||
processedConfigFile = "/run/agenix/immich.config.json";
|
||||
in {
|
||||
in
|
||||
{
|
||||
age.secrets.resticpasswd = {
|
||||
generator.script = "alnum";
|
||||
};
|
||||
|
@ -206,7 +200,7 @@ in {
|
|||
|
||||
system.activationScripts.agenixRooterDerivedSecrets = {
|
||||
# Run after agenix has generated secrets
|
||||
deps = ["agenix"];
|
||||
deps = [ "agenix" ];
|
||||
text = ''
|
||||
immichClientSecret=$(< ${config.age.secrets.immich-oauth2-client-secret.path})
|
||||
${pkgs.jq}/bin/jq --arg immichClientSecret "$immichClientSecret" '.oauth.clientSecret = $immichClientSecret' ${configFile} > ${processedConfigFile}
|
||||
|
@ -221,11 +215,11 @@ in {
|
|||
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [3000];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
|
||||
networking.nftables.chains.forward.into-immich-container = {
|
||||
after = ["conntrack"];
|
||||
after = [ "conntrack" ];
|
||||
rules = [
|
||||
"iifname elisabeth ip saddr ${nodes.elisabeth.config.wireguard.elisabeth.ipv4} tcp dport 3001 accept"
|
||||
"iifname podman1 oifname lan accept"
|
||||
|
@ -313,9 +307,7 @@ in {
|
|||
"${upload_folder}:/usr/src/app/upload:rw"
|
||||
"${environment.DB_PASSWORD_FILE}:${environment.DB_PASSWORD_FILE}:ro"
|
||||
];
|
||||
ports = [
|
||||
"3000:3001/tcp"
|
||||
];
|
||||
ports = [ "3000:3001/tcp" ];
|
||||
dependsOn = [
|
||||
"immich_postgres"
|
||||
"immich_redis"
|
||||
|
@ -327,18 +319,16 @@ in {
|
|||
"--ip=${ipImmichServer}"
|
||||
];
|
||||
};
|
||||
systemd.services."podman-immich_server" =
|
||||
serviceConfig
|
||||
// {
|
||||
unitConfig.UpheldBy = [
|
||||
"podman-immich_postgres.service"
|
||||
"podman-immich_redis.service"
|
||||
];
|
||||
};
|
||||
systemd.services."podman-immich_server" = serviceConfig // {
|
||||
unitConfig.UpheldBy = [
|
||||
"podman-immich_postgres.service"
|
||||
"podman-immich_redis.service"
|
||||
];
|
||||
};
|
||||
|
||||
# Networks
|
||||
systemd.services."podman-network-immich-default" = {
|
||||
path = [pkgs.podman];
|
||||
path = [ pkgs.podman ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
|
@ -347,8 +337,8 @@ in {
|
|||
script = ''
|
||||
podman network inspect immich-default || podman network create immich-default --opt isolate=true --disable-dns --subnet=10.89.0.0/24
|
||||
'';
|
||||
partOf = ["podman-compose-immich-root.target"];
|
||||
wantedBy = ["podman-compose-immich-root.target"];
|
||||
partOf = [ "podman-compose-immich-root.target" ];
|
||||
wantedBy = [ "podman-compose-immich-root.target" ];
|
||||
};
|
||||
|
||||
# Root service
|
||||
|
@ -358,6 +348,6 @@ in {
|
|||
unitConfig = {
|
||||
Description = "Root target generated by compose2nix.";
|
||||
};
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
{config, ...}: let
|
||||
{ config, ... }:
|
||||
let
|
||||
kanidmdomain = "auth.${config.secrets.secrets.global.domains.web}";
|
||||
in {
|
||||
imports = [../../modules/kanidm.nix];
|
||||
in
|
||||
{
|
||||
imports = [ ../../modules/kanidm.nix ];
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [3000];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
disabledModules = ["services/security/kanidm.nix"];
|
||||
disabledModules = [ "services/security/kanidm.nix" ];
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = "/var/lib/kanidm";
|
||||
|
@ -74,42 +76,50 @@ in {
|
|||
inherit (config.secrets.secrets.local.kanidm) persons;
|
||||
|
||||
groups."paperless.access" = {
|
||||
members = ["paperless.admins"];
|
||||
members = [ "paperless.admins" ];
|
||||
};
|
||||
# currently not usable
|
||||
groups."paperless.admins" = {
|
||||
members = ["administrator"];
|
||||
members = [ "administrator" ];
|
||||
};
|
||||
systems.oauth2.paperless = {
|
||||
displayName = "paperless";
|
||||
originUrl = "https://ppl.${config.secrets.secrets.global.domains.web}/";
|
||||
basicSecretFile = config.age.secrets.oauth2-paperless.path;
|
||||
scopeMaps."paperless.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."paperless.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
preferShortUsername = true;
|
||||
};
|
||||
|
||||
groups."nextcloud.access" = {
|
||||
members = ["nextcloud.admins"];
|
||||
members = [ "nextcloud.admins" ];
|
||||
};
|
||||
# currently not usable
|
||||
groups."nextcloud.admins" = {
|
||||
members = ["administrator"];
|
||||
members = [ "administrator" ];
|
||||
};
|
||||
systems.oauth2.nextcloud = {
|
||||
displayName = "nextcloud";
|
||||
originUrl = "https://nc.${config.secrets.secrets.global.domains.web}/";
|
||||
basicSecretFile = config.age.secrets.oauth2-nextcloud.path;
|
||||
allowInsecureClientDisablePkce = true;
|
||||
scopeMaps."nextcloud.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."nextcloud.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
preferShortUsername = true;
|
||||
};
|
||||
|
||||
groups."immich.access" = {
|
||||
members = ["immich.admins"];
|
||||
members = [ "immich.admins" ];
|
||||
};
|
||||
# currently not usable
|
||||
groups."immich.admins" = {
|
||||
members = ["administrator"];
|
||||
members = [ "administrator" ];
|
||||
};
|
||||
systems.oauth2.immich = {
|
||||
displayName = "Immich";
|
||||
|
@ -117,57 +127,84 @@ in {
|
|||
basicSecretFile = config.age.secrets.oauth2-immich.path;
|
||||
allowInsecureClientDisablePkce = true;
|
||||
enableLegacyCrypto = true;
|
||||
scopeMaps."immich.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."immich.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
preferShortUsername = true;
|
||||
};
|
||||
|
||||
groups."rss.access" = {};
|
||||
groups."firefly.access" = {};
|
||||
groups."ollama.access" = {};
|
||||
groups."adguardhome.access" = {};
|
||||
groups."octoprint.access" = {};
|
||||
groups."rss.access" = { };
|
||||
groups."firefly.access" = { };
|
||||
groups."ollama.access" = { };
|
||||
groups."adguardhome.access" = { };
|
||||
groups."octoprint.access" = { };
|
||||
|
||||
systems.oauth2.oauth2-proxy = {
|
||||
displayName = "Oauth2-Proxy";
|
||||
originUrl = "https://oauth2.${config.secrets.secrets.global.domains.web}/";
|
||||
basicSecretFile = config.age.secrets.oauth2-proxy.path;
|
||||
scopeMaps."adguardhome.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."rss.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."firefly.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."ollama.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."octoprint.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."adguardhome.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
scopeMaps."rss.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
scopeMaps."firefly.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
scopeMaps."ollama.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
scopeMaps."octoprint.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
preferShortUsername = true;
|
||||
claimMaps.groups = {
|
||||
joinType = "array";
|
||||
valuesByGroup."adguardhome.access" = ["adguardhome_access"];
|
||||
valuesByGroup."rss.access" = ["ttrss_access"];
|
||||
valuesByGroup."firefly.access" = ["firefly_access"];
|
||||
valuesByGroup."ollama.access" = ["ollama_access"];
|
||||
valuesByGroup."octoprint.access" = ["octoprint_access"];
|
||||
valuesByGroup."adguardhome.access" = [ "adguardhome_access" ];
|
||||
valuesByGroup."rss.access" = [ "ttrss_access" ];
|
||||
valuesByGroup."firefly.access" = [ "firefly_access" ];
|
||||
valuesByGroup."ollama.access" = [ "ollama_access" ];
|
||||
valuesByGroup."octoprint.access" = [ "octoprint_access" ];
|
||||
};
|
||||
};
|
||||
|
||||
groups."forgejo.access" = {
|
||||
members = ["forgejo.admins"];
|
||||
members = [ "forgejo.admins" ];
|
||||
};
|
||||
groups."forgejo.admins" = {
|
||||
members = ["administrator"];
|
||||
members = [ "administrator" ];
|
||||
};
|
||||
systems.oauth2.forgejo = {
|
||||
displayName = "Forgejo";
|
||||
originUrl = "https://forge.${config.secrets.secrets.global.domains.web}/";
|
||||
basicSecretFile = config.age.secrets.oauth2-forgejo.path;
|
||||
scopeMaps."forgejo.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."forgejo.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
allowInsecureClientDisablePkce = true;
|
||||
preferShortUsername = true;
|
||||
claimMaps.groups = {
|
||||
joinType = "array";
|
||||
valuesByGroup."forgejo.admins" = ["admin"];
|
||||
valuesByGroup."forgejo.admins" = [ "admin" ];
|
||||
};
|
||||
};
|
||||
|
||||
groups."netbird.access" = {
|
||||
};
|
||||
groups."netbird.access" = { };
|
||||
systems.oauth2.netbird = {
|
||||
public = true;
|
||||
displayName = "Netbird";
|
||||
|
@ -175,7 +212,11 @@ in {
|
|||
preferShortUsername = true;
|
||||
enableLocalhostRedirects = true;
|
||||
enableLegacyCrypto = true;
|
||||
scopeMaps."netbird.access" = ["openid" "email" "profile"];
|
||||
scopeMaps."netbird.access" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -5,12 +5,17 @@
|
|||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
priv_domain = config.secrets.secrets.global.domains.mail_private;
|
||||
domain = config.secrets.secrets.global.domains.mail_public;
|
||||
mailDomains = [priv_domain domain];
|
||||
mailDomains = [
|
||||
priv_domain
|
||||
domain
|
||||
];
|
||||
maddyBackupDir = "/var/cache/backups/maddy";
|
||||
in {
|
||||
in
|
||||
{
|
||||
systemd.tmpfiles.settings = {
|
||||
"10-maddy".${maddyBackupDir}.d = {
|
||||
inherit (config.services.maddy) user group;
|
||||
|
@ -40,7 +45,10 @@ in {
|
|||
inherit (config.secrets.secrets.global.hetzner.users.maddy) subUid path;
|
||||
sshAgeSecret = "maddyHetznerSsh";
|
||||
};
|
||||
paths = ["/var/lib/maddy/messages" maddyBackupDir];
|
||||
paths = [
|
||||
"/var/lib/maddy/messages"
|
||||
maddyBackupDir
|
||||
];
|
||||
pruneOpts = [
|
||||
"--keep-daily 10"
|
||||
"--keep-weekly 7"
|
||||
|
@ -49,22 +57,21 @@ in {
|
|||
];
|
||||
};
|
||||
};
|
||||
systemd.services.maddy-backup = let
|
||||
cfg = config.systemd.services.maddy;
|
||||
in {
|
||||
description = "Maddy db backup";
|
||||
serviceConfig =
|
||||
lib.recursiveUpdate
|
||||
cfg.serviceConfig
|
||||
{
|
||||
systemd.services.maddy-backup =
|
||||
let
|
||||
cfg = config.systemd.services.maddy;
|
||||
in
|
||||
{
|
||||
description = "Maddy db backup";
|
||||
serviceConfig = lib.recursiveUpdate cfg.serviceConfig {
|
||||
ExecStart = "${pkgs.sqlite}/bin/sqlite3 /var/lib/maddy/imapsql.db \".backup '${maddyBackupDir}/imapsql.sqlite3'\"";
|
||||
Restart = "no";
|
||||
Type = "oneshot";
|
||||
};
|
||||
inherit (cfg) environment;
|
||||
requiredBy = ["restic-backups-main.service"];
|
||||
before = ["restic-backups-main.service"];
|
||||
};
|
||||
inherit (cfg) environment;
|
||||
requiredBy = [ "restic-backups-main.service" ];
|
||||
before = [ "restic-backups-main.service" ];
|
||||
};
|
||||
|
||||
age.secrets.patrickPasswd = {
|
||||
generator.script = "alnum";
|
||||
|
@ -73,7 +80,10 @@ in {
|
|||
};
|
||||
# Opening ports for additional TLS listeners. This is not yet
|
||||
# implemented in the module.
|
||||
networking.firewall.allowedTCPPorts = [993 465];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
993
|
||||
465
|
||||
];
|
||||
services.maddy = {
|
||||
enable = true;
|
||||
hostname = "mx1." + domain;
|
||||
|
@ -91,9 +101,7 @@ in {
|
|||
ensureCredentials = {
|
||||
"patrick@${domain}".passwordFile = config.age.secrets.patrickPasswd.path;
|
||||
};
|
||||
ensureAccounts = [
|
||||
"patrick@${domain}"
|
||||
];
|
||||
ensureAccounts = [ "patrick@${domain}" ];
|
||||
openFirewall = true;
|
||||
config = ''
|
||||
## Maddy Mail Server - default configuration file (2022-06-18)
|
||||
|
@ -288,33 +296,31 @@ in {
|
|||
useACMEWildcardHost = true;
|
||||
locations."=/mail/config-v1.1.xml".alias =
|
||||
pkgs.writeText "autoconfig.${domain}.xml"
|
||||
/*
|
||||
xml
|
||||
*/
|
||||
''
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<clientConfig version="1.1">
|
||||
<emailProvider id="${domain}">
|
||||
<domain>${domain}</domain>
|
||||
<displayName>%EMAILADDRESS%</displayName>
|
||||
<displayShortName>%EMAILLOCALPART%</displayShortName>
|
||||
<incomingServer type="imap">
|
||||
<hostname>mail.${domain}</hostname>
|
||||
<port>993</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>mail.${domain}</hostname>
|
||||
<port>465</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</outgoingServer>
|
||||
</emailProvider>
|
||||
</clientConfig>
|
||||
'';
|
||||
# xml
|
||||
''
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<clientConfig version="1.1">
|
||||
<emailProvider id="${domain}">
|
||||
<domain>${domain}</domain>
|
||||
<displayName>%EMAILADDRESS%</displayName>
|
||||
<displayShortName>%EMAILLOCALPART%</displayShortName>
|
||||
<incomingServer type="imap">
|
||||
<hostname>mail.${domain}</hostname>
|
||||
<port>993</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>mail.${domain}</hostname>
|
||||
<port>465</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</outgoingServer>
|
||||
</emailProvider>
|
||||
</clientConfig>
|
||||
'';
|
||||
}))
|
||||
];
|
||||
environment.persistence."/persist".directories = [
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
{config, ...}: {
|
||||
networking.firewall.allowedUDPPorts = [config.services.teamspeak3.defaultVoicePort];
|
||||
{ config, ... }:
|
||||
{
|
||||
networking.firewall.allowedUDPPorts = [ config.services.teamspeak3.defaultVoicePort ];
|
||||
services.teamspeak3 = {
|
||||
enable = true;
|
||||
};
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [80 3000 3001];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [
|
||||
80
|
||||
3000
|
||||
3001
|
||||
];
|
||||
};
|
||||
|
||||
age.secrets.coturnPassword = {
|
||||
|
@ -19,14 +20,20 @@
|
|||
};
|
||||
|
||||
age.secrets.dataEnc = {
|
||||
generator.script = {pkgs, ...}: ''
|
||||
${lib.getExe pkgs.openssl} rand -base64 32
|
||||
'';
|
||||
generator.script =
|
||||
{ pkgs, ... }:
|
||||
''
|
||||
${lib.getExe pkgs.openssl} rand -base64 32
|
||||
'';
|
||||
group = "netbird";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [80 3000 3001];
|
||||
networking.firewall.allowedUDPPorts = [3478];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
3000
|
||||
3001
|
||||
];
|
||||
networking.firewall.allowedUDPPorts = [ 3478 ];
|
||||
services.netbird = {
|
||||
server = {
|
||||
enable = true;
|
||||
|
|
|
@ -4,9 +4,11 @@
|
|||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
hostName = "nc.${config.secrets.secrets.global.domains.web}";
|
||||
in {
|
||||
in
|
||||
{
|
||||
age.secrets.maddyPasswd = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
|
@ -20,7 +22,8 @@ in {
|
|||
mode = "640";
|
||||
};
|
||||
services.maddy.ensureCredentials = {
|
||||
"nextcloud@${config.secrets.secrets.global.domains.mail_public}".passwordFile = nodes.maddy.config.age.secrets.nextcloudPasswd.path;
|
||||
"nextcloud@${config.secrets.secrets.global.domains.mail_public}".passwordFile =
|
||||
nodes.maddy.config.age.secrets.nextcloudPasswd.path;
|
||||
};
|
||||
};
|
||||
environment.persistence."/persist".directories = [
|
||||
|
@ -54,7 +57,15 @@ in {
|
|||
config.adminpassFile = config.age.secrets.ncpasswd.path; # Kinda ok just remember to instanly change after first setup
|
||||
config.adminuser = "admin";
|
||||
extraApps = with config.services.nextcloud.package.packages.apps; {
|
||||
inherit contacts calendar tasks notes maps phonetrack user_oidc;
|
||||
inherit
|
||||
contacts
|
||||
calendar
|
||||
tasks
|
||||
notes
|
||||
maps
|
||||
phonetrack
|
||||
user_oidc
|
||||
;
|
||||
};
|
||||
maxUploadSize = "4G";
|
||||
extraAppsEnable = true;
|
||||
|
@ -62,7 +73,7 @@ in {
|
|||
phpOptions."opcache.interned_strings_buffer" = "32";
|
||||
settings = {
|
||||
default_phone_region = "DE";
|
||||
trusted_proxies = [nodes.elisabeth.config.wireguard.elisabeth.ipv4];
|
||||
trusted_proxies = [ nodes.elisabeth.config.wireguard.elisabeth.ipv4 ];
|
||||
overwriteprotocol = "https";
|
||||
maintenance_window_start = 2;
|
||||
enabledPreviewProviders = [
|
||||
|
@ -93,20 +104,22 @@ in {
|
|||
dbtype = "pgsql";
|
||||
};
|
||||
};
|
||||
systemd.tmpfiles.rules = let
|
||||
mailer-passwd-conf = pkgs.writeText "nextcloud-config.php" ''
|
||||
<?php
|
||||
$CONFIG = [
|
||||
'mail_smtppassword' => trim(file_get_contents('${config.age.secrets.maddyPasswd.path}')),
|
||||
];
|
||||
'';
|
||||
in [
|
||||
"L+ ${config.services.nextcloud.datadir}/config/mailer.config.php - - - - ${mailer-passwd-conf}"
|
||||
];
|
||||
systemd.tmpfiles.rules =
|
||||
let
|
||||
mailer-passwd-conf = pkgs.writeText "nextcloud-config.php" ''
|
||||
<?php
|
||||
$CONFIG = [
|
||||
'mail_smtppassword' => trim(file_get_contents('${config.age.secrets.maddyPasswd.path}')),
|
||||
];
|
||||
'';
|
||||
in
|
||||
[
|
||||
"L+ ${config.services.nextcloud.datadir}/config/mailer.config.php - - - - ${mailer-passwd-conf}"
|
||||
];
|
||||
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [80];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
networking = {
|
||||
# Use systemd-resolved inside the container
|
||||
|
|
|
@ -1,11 +1,8 @@
|
|||
{ config, nodes, ... }:
|
||||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: {
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [3000];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
|
||||
age.secrets.oauth2-cookie-secret = {
|
||||
|
@ -46,7 +43,7 @@
|
|||
redeemURL = "https://auth.${config.secrets.secrets.global.domains.web}/oauth2/token";
|
||||
validateURL = "https://auth.${config.secrets.secrets.global.domains.web}/oauth2/openid/oauth2-proxy/userinfo";
|
||||
clientID = "oauth2-proxy";
|
||||
email.domains = ["*"];
|
||||
email.domains = [ "*" ];
|
||||
};
|
||||
|
||||
systemd.services.oauth2-proxy.serviceConfig = {
|
||||
|
@ -72,18 +69,18 @@
|
|||
# it includes the newline terminating the file which
|
||||
# makes kanidm reject the secret
|
||||
age.secrets.oauth2-client-secret-env = {
|
||||
generator.dependencies = [
|
||||
nodes.elisabeth-kanidm.config.age.secrets.oauth2-proxy
|
||||
];
|
||||
generator.script = {
|
||||
lib,
|
||||
decrypt,
|
||||
deps,
|
||||
...
|
||||
}: ''
|
||||
echo -n "OAUTH2_PROXY_CLIENT_SECRET="
|
||||
${decrypt} ${lib.escapeShellArg (lib.head deps).file}
|
||||
'';
|
||||
generator.dependencies = [ nodes.elisabeth-kanidm.config.age.secrets.oauth2-proxy ];
|
||||
generator.script =
|
||||
{
|
||||
lib,
|
||||
decrypt,
|
||||
deps,
|
||||
...
|
||||
}:
|
||||
''
|
||||
echo -n "OAUTH2_PROXY_CLIENT_SECRET="
|
||||
${decrypt} ${lib.escapeShellArg (lib.head deps).file}
|
||||
'';
|
||||
mode = "440";
|
||||
group = "oauth2-proxy";
|
||||
};
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [config.services.octoprint.port];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ config.services.octoprint.port ];
|
||||
};
|
||||
services.octoprint = {
|
||||
port = 3000;
|
||||
enable = true;
|
||||
plugins = ps: with ps; [ender3v2tempfix];
|
||||
plugins = ps: with ps; [ ender3v2tempfix ];
|
||||
extraConfig = {
|
||||
accessControl = {
|
||||
addRemoteUser = true;
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [config.services.open-webui.port];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ config.services.open-webui.port ];
|
||||
};
|
||||
services.ollama = {
|
||||
host = "localhost";
|
||||
|
|
|
@ -4,10 +4,12 @@
|
|||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
paperlessdomain = "ppl.${config.secrets.secrets.global.domains.web}";
|
||||
paperlessBackupDir = "/var/cache/backups/paperless";
|
||||
in {
|
||||
in
|
||||
{
|
||||
systemd.tmpfiles.settings = {
|
||||
"10-paperless".${paperlessBackupDir}.d = {
|
||||
inherit (config.services.paperless) user;
|
||||
|
@ -36,7 +38,7 @@ in {
|
|||
inherit (config.secrets.secrets.global.hetzner.users.paperless) subUid path;
|
||||
sshAgeSecret = "paperlessHetznerSsh";
|
||||
};
|
||||
paths = [paperlessBackupDir];
|
||||
paths = [ paperlessBackupDir ];
|
||||
pruneOpts = [
|
||||
"--keep-daily 10"
|
||||
"--keep-weekly 7"
|
||||
|
@ -45,27 +47,26 @@ in {
|
|||
];
|
||||
};
|
||||
};
|
||||
systemd.services.paperless-backup = let
|
||||
cfg = config.systemd.services.paperless-consumer;
|
||||
in {
|
||||
description = "Paperless document backup";
|
||||
serviceConfig =
|
||||
lib.recursiveUpdate
|
||||
cfg.serviceConfig
|
||||
{
|
||||
systemd.services.paperless-backup =
|
||||
let
|
||||
cfg = config.systemd.services.paperless-consumer;
|
||||
in
|
||||
{
|
||||
description = "Paperless document backup";
|
||||
serviceConfig = lib.recursiveUpdate cfg.serviceConfig {
|
||||
ExecStart = "${config.services.paperless.package}/bin/paperless-ngx document_exporter -na -nt -f -d ${paperlessBackupDir}";
|
||||
ReadWritePaths = cfg.serviceConfig.ReadWritePaths ++ [paperlessBackupDir];
|
||||
ReadWritePaths = cfg.serviceConfig.ReadWritePaths ++ [ paperlessBackupDir ];
|
||||
Restart = "no";
|
||||
Type = "oneshot";
|
||||
};
|
||||
inherit (cfg) environment;
|
||||
requiredBy = ["restic-backups-main.service"];
|
||||
before = ["restic-backups-main.service"];
|
||||
};
|
||||
inherit (cfg) environment;
|
||||
requiredBy = [ "restic-backups-main.service" ];
|
||||
before = [ "restic-backups-main.service" ];
|
||||
};
|
||||
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [config.services.paperless.port];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ config.services.paperless.port ];
|
||||
};
|
||||
|
||||
age.secrets.paperless-admin-passwd = {
|
||||
|
|
|
@ -4,18 +4,20 @@
|
|||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
prestart = pkgs.writeShellScript "pr-tracker-pre" ''
|
||||
if [ ! -d ./nixpkgs ]; then
|
||||
${lib.getExe pkgs.git} clone https://github.com/NixOS/nixpkgs.git
|
||||
fi
|
||||
'';
|
||||
in {
|
||||
in
|
||||
{
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [3000];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [3000];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = "/var/lib/pr-tracker";
|
||||
|
@ -43,15 +45,16 @@ in {
|
|||
mode = "640";
|
||||
};
|
||||
services.maddy.ensureCredentials = {
|
||||
"pr-tracker@${config.secrets.secrets.global.domains.mail_public}".passwordFile = nodes.maddy.config.age.secrets.pr-trackerPasswd.path;
|
||||
"pr-tracker@${config.secrets.secrets.global.domains.mail_public}".passwordFile =
|
||||
nodes.maddy.config.age.secrets.pr-trackerPasswd.path;
|
||||
};
|
||||
};
|
||||
systemd.sockets.pr-tracker = {
|
||||
listenStreams = ["0.0.0.0:3000"];
|
||||
wantedBy = ["sockets.target"];
|
||||
listenStreams = [ "0.0.0.0:3000" ];
|
||||
wantedBy = [ "sockets.target" ];
|
||||
};
|
||||
systemd.services.pr-tracker = {
|
||||
path = [pkgs.git];
|
||||
path = [ pkgs.git ];
|
||||
serviceConfig = {
|
||||
User = "pr-tracker";
|
||||
Group = "pr-tracker";
|
||||
|
@ -104,13 +107,13 @@ in {
|
|||
};
|
||||
};
|
||||
systemd.timers.pr-tracker-update = {
|
||||
wantedBy = ["timers.target"];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = "30m";
|
||||
OnUnitActiveSec = "30m";
|
||||
};
|
||||
};
|
||||
users.groups.pr-tracker = {};
|
||||
users.groups.pr-tracker = { };
|
||||
users.users.pr-tracker = {
|
||||
isSystemUser = true;
|
||||
group = "pr-tracker";
|
||||
|
|
|
@ -4,14 +4,20 @@
|
|||
config,
|
||||
pkgs, # not unused needed for the usage of attrs later to contains pkgs
|
||||
...
|
||||
} @ attrs: let
|
||||
}@attrs:
|
||||
let
|
||||
hostName = "radicale.${config.secrets.secrets.global.domains.mail}";
|
||||
in {
|
||||
imports = [./containers.nix ./ddclient.nix ./acme.nix];
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./containers.nix
|
||||
./ddclient.nix
|
||||
./acme.nix
|
||||
];
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
upstreams.radicale = {
|
||||
servers."192.168.178.34:8000" = {};
|
||||
servers."192.168.178.34:8000" = { };
|
||||
|
||||
extraConfig = ''
|
||||
zone radicale 64k ;
|
||||
|
@ -32,10 +38,10 @@ in {
|
|||
config = _: {
|
||||
systemd.network.networks = {
|
||||
"lan01" = {
|
||||
address = ["192.168.178.34/24"];
|
||||
gateway = ["192.168.178.1"];
|
||||
address = [ "192.168.178.34/24" ];
|
||||
gateway = [ "192.168.178.1" ];
|
||||
matchConfig.Name = "lan01*";
|
||||
dns = ["192.168.178.2"];
|
||||
dns = [ "192.168.178.2" ];
|
||||
networkConfig = {
|
||||
IPv6PrivacyExtensions = "yes";
|
||||
MulticastDNS = true;
|
||||
|
@ -54,7 +60,10 @@ in {
|
|||
enable = true;
|
||||
setting = {
|
||||
server = {
|
||||
hosts = ["0.0.0.0:8000" "[::]:8000"];
|
||||
hosts = [
|
||||
"0.0.0.0:8000"
|
||||
"[::]:8000"
|
||||
];
|
||||
auth = {
|
||||
type = "htpasswd";
|
||||
htpasswd_filename = "/etc/radicale/users";
|
||||
|
@ -89,7 +98,7 @@ in {
|
|||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [8000];
|
||||
allowedTCPPorts = [ 8000 ];
|
||||
};
|
||||
# Use systemd-resolved inside the container
|
||||
useHostResolvConf = lib.mkForce false;
|
||||
|
@ -106,4 +115,3 @@ in {
|
|||
#kanidm
|
||||
#remote backups
|
||||
#immich
|
||||
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
services.samba-wsdd = {
|
||||
enable = true; # make shares visible for windows 10 clients
|
||||
openFirewall = true;
|
||||
};
|
||||
|
||||
disabledModules = ["services/networking/netbird.nix"];
|
||||
disabledModules = [ "services/networking/netbird.nix" ];
|
||||
|
||||
imports = [../../modules/netbird-client.nix];
|
||||
imports = [ ../../modules/netbird-client.nix ];
|
||||
services.netbird.tunnels = {
|
||||
netbird-samba = {
|
||||
environment = {
|
||||
|
@ -43,7 +40,7 @@
|
|||
inherit (config.secrets.secrets.global.hetzner.users.smb) subUid path;
|
||||
sshAgeSecret = "resticHetznerSsh";
|
||||
};
|
||||
paths = ["/bunker"];
|
||||
paths = [ "/bunker" ];
|
||||
pruneOpts = [
|
||||
"--keep-daily 10"
|
||||
"--keep-weekly 7"
|
||||
|
@ -55,11 +52,17 @@
|
|||
wireguard.samba-patrick.server = {
|
||||
host = config.secrets.secrets.global.domains.web;
|
||||
port = 51830;
|
||||
reservedAddresses = ["10.43.0.0/20" "fd00:1765::/112"];
|
||||
reservedAddresses = [
|
||||
"10.43.0.0/20"
|
||||
"fd00:1765::/112"
|
||||
];
|
||||
openFirewall = true;
|
||||
};
|
||||
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = ["samba-patrick" "netbird-samba"];
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [
|
||||
"samba-patrick"
|
||||
"netbird-samba"
|
||||
];
|
||||
|
||||
services.samba = {
|
||||
enable = true;
|
||||
|
@ -106,53 +109,49 @@
|
|||
"disable spoolss = yes"
|
||||
"show add printer wizard = no"
|
||||
];
|
||||
shares = let
|
||||
mkShare = {
|
||||
name,
|
||||
user ? "smb",
|
||||
group ? "smb",
|
||||
hasBunker ? false,
|
||||
hasPaperless ? false,
|
||||
persistRoot ? "/panzer",
|
||||
}: cfg: let
|
||||
config =
|
||||
shares =
|
||||
let
|
||||
mkShare =
|
||||
{
|
||||
"#persistRoot" = persistRoot;
|
||||
"#user" = user;
|
||||
"#group" = group;
|
||||
"read only" = "no";
|
||||
"guest ok" = "no";
|
||||
"create mask" = "0740";
|
||||
"directory mask" = "0750";
|
||||
"force user" = user;
|
||||
"force group" = group;
|
||||
"valid users" = "${user} @${group}";
|
||||
"force create mode" = "0660";
|
||||
"force directory mode" = "0770";
|
||||
# Might be necessary for windows user to be able to open thing in smb
|
||||
"acl allow execute always" = "no";
|
||||
name,
|
||||
user ? "smb",
|
||||
group ? "smb",
|
||||
hasBunker ? false,
|
||||
hasPaperless ? false,
|
||||
persistRoot ? "/panzer",
|
||||
}:
|
||||
cfg:
|
||||
let
|
||||
config = {
|
||||
"#persistRoot" = persistRoot;
|
||||
"#user" = user;
|
||||
"#group" = group;
|
||||
"read only" = "no";
|
||||
"guest ok" = "no";
|
||||
"create mask" = "0740";
|
||||
"directory mask" = "0750";
|
||||
"force user" = user;
|
||||
"force group" = group;
|
||||
"valid users" = "${user} @${group}";
|
||||
"force create mode" = "0660";
|
||||
"force directory mode" = "0770";
|
||||
# Might be necessary for windows user to be able to open thing in smb
|
||||
"acl allow execute always" = "no";
|
||||
} // cfg;
|
||||
in
|
||||
{
|
||||
"${name}" = config // {
|
||||
"path" = "/media/smb/${name}";
|
||||
};
|
||||
}
|
||||
// cfg;
|
||||
in
|
||||
{
|
||||
"${name}" =
|
||||
config
|
||||
// {"path" = "/media/smb/${name}";};
|
||||
}
|
||||
// lib.optionalAttrs hasBunker
|
||||
{
|
||||
"${name}-important" =
|
||||
config
|
||||
// {
|
||||
// lib.optionalAttrs hasBunker {
|
||||
"${name}-important" = config // {
|
||||
"path" = "/media/smb/${name}-important";
|
||||
"#persistRoot" = "/bunker";
|
||||
};
|
||||
}
|
||||
// lib.optionalAttrs hasPaperless
|
||||
{
|
||||
"${name}-paperless" =
|
||||
config
|
||||
// {
|
||||
}
|
||||
// lib.optionalAttrs hasPaperless {
|
||||
"${name}-paperless" = config // {
|
||||
"path" = "/media/smb/${name}-paperless";
|
||||
"#paperless" = true;
|
||||
"force user" = "paperless";
|
||||
|
@ -160,46 +159,47 @@
|
|||
# Empty to prevent imperamence setting a persistence folder
|
||||
"#persistRoot" = "";
|
||||
};
|
||||
};
|
||||
in
|
||||
};
|
||||
in
|
||||
lib.mkMerge [
|
||||
(mkShare {
|
||||
name = "ggr-data";
|
||||
user = "ggr";
|
||||
group = "ggr";
|
||||
hasBunker = true;
|
||||
} {})
|
||||
} { })
|
||||
(mkShare {
|
||||
name = "patri";
|
||||
user = "patrick";
|
||||
group = "patrick";
|
||||
hasBunker = true;
|
||||
hasPaperless = true;
|
||||
} {})
|
||||
} { })
|
||||
(mkShare {
|
||||
name = "helen-data";
|
||||
user = "helen";
|
||||
group = "helen";
|
||||
hasBunker = true;
|
||||
} {})
|
||||
} { })
|
||||
(mkShare {
|
||||
name = "david";
|
||||
user = "david";
|
||||
group = "david";
|
||||
hasBunker = true;
|
||||
hasPaperless = true;
|
||||
} {})
|
||||
} { })
|
||||
(mkShare {
|
||||
name = "printer";
|
||||
user = "printer";
|
||||
group = "printer";
|
||||
} {})
|
||||
} { })
|
||||
(mkShare {
|
||||
name = "family-data";
|
||||
user = "family";
|
||||
group = "family";
|
||||
} {})
|
||||
(mkShare {
|
||||
} { })
|
||||
(mkShare
|
||||
{
|
||||
name = "media";
|
||||
user = "family";
|
||||
group = "family";
|
||||
|
@ -208,7 +208,8 @@
|
|||
{
|
||||
"read only" = "yes";
|
||||
"write list" = "@family";
|
||||
})
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
# to get this file start a smbd, add users using 'smbpasswd -a <user>'
|
||||
|
@ -216,129 +217,162 @@
|
|||
age.secrets.smbpassdb = {
|
||||
rekeyFile = config.node.secretsDir + "/smbpassdb.tdb.age";
|
||||
};
|
||||
users = let
|
||||
users = lib.unique (lib.mapAttrsToList (_: val: val."force user") config.services.samba.shares);
|
||||
groups = lib.unique (users ++ (lib.mapAttrsToList (_: val: val."force group") config.services.samba.shares));
|
||||
in {
|
||||
users = lib.mkMerge ((lib.flip map users (user: {
|
||||
${user} = {
|
||||
isNormalUser = true;
|
||||
home = "/var/empty";
|
||||
createHome = false;
|
||||
useDefaultShell = false;
|
||||
autoSubUidGidRange = false;
|
||||
group = "${user}";
|
||||
};
|
||||
}))
|
||||
++ [
|
||||
{paperless.isNormalUser = lib.mkForce false;}
|
||||
]);
|
||||
groups = lib.mkMerge ((lib.flip map groups (group: {
|
||||
${group} = {
|
||||
};
|
||||
}))
|
||||
++ [
|
||||
{
|
||||
family.members = ["patrick" "david" "helen" "ggr"];
|
||||
printer.members = ["patrick" "david" "helen" "ggr"];
|
||||
}
|
||||
]);
|
||||
};
|
||||
|
||||
fileSystems = lib.mkMerge (lib.flip lib.mapAttrsToList config.services.samba.shares (_: v:
|
||||
lib.optionalAttrs ((v ? "#paperless") && v."#paperless") {
|
||||
"${v.path}/consume" = {
|
||||
fsType = "none";
|
||||
options = ["bind"];
|
||||
device = "/paperless/consume/${v."#user"}";
|
||||
};
|
||||
"${v.path}/media/archive" = {
|
||||
fsType = "none ";
|
||||
options = ["bind" "ro"];
|
||||
device = "/paperless/media/documents/archive/${v."#user"}";
|
||||
};
|
||||
"${v.path}/media/originals" = {
|
||||
fsType = "none ";
|
||||
options = ["bind" "ro"];
|
||||
device = "/paperless/media/documents/originals/${v."#user"}";
|
||||
};
|
||||
}));
|
||||
|
||||
systemd.tmpfiles.settings = lib.mkMerge (lib.flip lib.mapAttrsToList config.services.samba.shares (_: v:
|
||||
lib.optionalAttrs ((v ? "#paperless") && v."#paperless") {
|
||||
"10-smb-paperless"."/paperless/consume/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/consume/${v."#user"}".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
|
||||
"10-smb-paperless"."/paperless/media/documents/archive/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/archive/${v."#user"}".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/archive/${v."#user"}/.keep".f = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0660";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/originals/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/originals/${v."#user"}".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/originals/${v."#user"}/.keep".f = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0660";
|
||||
};
|
||||
}));
|
||||
environment.persistence = lib.mkMerge (lib.flatten [
|
||||
(lib.flip lib.mapAttrsToList config.services.samba.shares (_: v:
|
||||
lib.optionalAttrs ((v ? "#persistRoot") && (v."#persistRoot" != "")) {
|
||||
${v."#persistRoot"}.directories = [
|
||||
users =
|
||||
let
|
||||
users = lib.unique (lib.mapAttrsToList (_: val: val."force user") config.services.samba.shares);
|
||||
groups = lib.unique (
|
||||
users ++ (lib.mapAttrsToList (_: val: val."force group") config.services.samba.shares)
|
||||
);
|
||||
in
|
||||
{
|
||||
users = lib.mkMerge (
|
||||
(lib.flip map users (user: {
|
||||
${user} = {
|
||||
isNormalUser = true;
|
||||
home = "/var/empty";
|
||||
createHome = false;
|
||||
useDefaultShell = false;
|
||||
autoSubUidGidRange = false;
|
||||
group = "${user}";
|
||||
};
|
||||
}))
|
||||
++ [ { paperless.isNormalUser = lib.mkForce false; } ]
|
||||
);
|
||||
groups = lib.mkMerge (
|
||||
(lib.flip map groups (group: {
|
||||
${group} = { };
|
||||
}))
|
||||
++ [
|
||||
{
|
||||
directory = "${v.path}";
|
||||
user = "${v."force user"}";
|
||||
group = "${v."force group"}";
|
||||
mode = "0770";
|
||||
family.members = [
|
||||
"patrick"
|
||||
"david"
|
||||
"helen"
|
||||
"ggr"
|
||||
];
|
||||
printer.members = [
|
||||
"patrick"
|
||||
"david"
|
||||
"helen"
|
||||
"ggr"
|
||||
];
|
||||
}
|
||||
];
|
||||
}))
|
||||
(lib.flip lib.mapAttrsToList config.services.netbird.tunnels (
|
||||
_: v: {
|
||||
"/state".directories = [
|
||||
{
|
||||
directory = "/var/lib/${v.stateDir}";
|
||||
mode = "0770";
|
||||
}
|
||||
];
|
||||
]
|
||||
);
|
||||
};
|
||||
|
||||
fileSystems = lib.mkMerge (
|
||||
lib.flip lib.mapAttrsToList config.services.samba.shares (
|
||||
_: v:
|
||||
lib.optionalAttrs ((v ? "#paperless") && v."#paperless") {
|
||||
"${v.path}/consume" = {
|
||||
fsType = "none";
|
||||
options = [ "bind" ];
|
||||
device = "/paperless/consume/${v."#user"}";
|
||||
};
|
||||
"${v.path}/media/archive" = {
|
||||
fsType = "none ";
|
||||
options = [
|
||||
"bind"
|
||||
"ro"
|
||||
];
|
||||
device = "/paperless/media/documents/archive/${v."#user"}";
|
||||
};
|
||||
"${v.path}/media/originals" = {
|
||||
fsType = "none ";
|
||||
options = [
|
||||
"bind"
|
||||
"ro"
|
||||
];
|
||||
device = "/paperless/media/documents/originals/${v."#user"}";
|
||||
};
|
||||
}
|
||||
))
|
||||
]);
|
||||
)
|
||||
);
|
||||
|
||||
systemd.tmpfiles.settings = lib.mkMerge (
|
||||
lib.flip lib.mapAttrsToList config.services.samba.shares (
|
||||
_: v:
|
||||
lib.optionalAttrs ((v ? "#paperless") && v."#paperless") {
|
||||
"10-smb-paperless"."/paperless/consume/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/consume/${v."#user"}".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
|
||||
"10-smb-paperless"."/paperless/media/documents/archive/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/archive/${v."#user"}".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/archive/${v."#user"}/.keep".f = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0660";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/originals/".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/originals/${v."#user"}".d = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0770";
|
||||
};
|
||||
"10-smb-paperless"."/paperless/media/documents/originals/${v."#user"}/.keep".f = {
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
mode = "0660";
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
environment.persistence = lib.mkMerge (
|
||||
lib.flatten [
|
||||
(lib.flip lib.mapAttrsToList config.services.samba.shares (
|
||||
_: v:
|
||||
lib.optionalAttrs ((v ? "#persistRoot") && (v."#persistRoot" != "")) {
|
||||
${v."#persistRoot"}.directories = [
|
||||
{
|
||||
directory = "${v.path}";
|
||||
user = "${v."force user"}";
|
||||
group = "${v."force group"}";
|
||||
mode = "0770";
|
||||
}
|
||||
];
|
||||
}
|
||||
))
|
||||
(lib.flip lib.mapAttrsToList config.services.netbird.tunnels (
|
||||
_: v: {
|
||||
"/state".directories = [
|
||||
{
|
||||
directory = "/var/lib/${v.stateDir}";
|
||||
mode = "0770";
|
||||
}
|
||||
];
|
||||
}
|
||||
))
|
||||
]
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [80];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
services.freshrss = {
|
||||
enable = true;
|
||||
|
|
|
@ -3,9 +3,11 @@
|
|||
lib,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
vaultwardenDomain = "pw.${config.secrets.secrets.global.domains.web}";
|
||||
in {
|
||||
in
|
||||
{
|
||||
age.secrets.vaultwarden-env = {
|
||||
rekeyFile = config.node.secretsDir + "/vaultwarden-env.age";
|
||||
mode = "440";
|
||||
|
@ -43,7 +45,7 @@ in {
|
|||
inherit (config.secrets.secrets.global.hetzner.users.vaultwarden) subUid path;
|
||||
sshAgeSecret = "vaultwardenHetznerSsh";
|
||||
};
|
||||
paths = [config.services.vaultwarden.backupDir];
|
||||
paths = [ config.services.vaultwarden.backupDir ];
|
||||
pruneOpts = [
|
||||
"--keep-daily 10"
|
||||
"--keep-weekly 7"
|
||||
|
@ -64,17 +66,18 @@ in {
|
|||
mode = "640";
|
||||
};
|
||||
services.maddy.ensureCredentials = {
|
||||
"vaultwarden@${config.secrets.secrets.global.domains.mail_public}".passwordFile = nodes.maddy.config.age.secrets.vaultwardenPasswd.path;
|
||||
"vaultwarden@${config.secrets.secrets.global.domains.mail_public}".passwordFile =
|
||||
nodes.maddy.config.age.secrets.vaultwardenPasswd.path;
|
||||
};
|
||||
};
|
||||
system.activationScripts.systemd_env_smtp_passwd = {
|
||||
text = ''
|
||||
echo "SMTP_PASSWORD=$(< ${lib.escapeShellArg config.age.secrets.maddyPasswd.path})" > /run/vaultwarden_smtp_passwd
|
||||
'';
|
||||
deps = ["agenix"];
|
||||
deps = [ "agenix" ];
|
||||
};
|
||||
|
||||
systemd.services.vaultwarden.serviceConfig.EnvironmentFile = ["/run/vaultwarden_smtp_passwd"];
|
||||
systemd.services.vaultwarden.serviceConfig.EnvironmentFile = [ "/run/vaultwarden_smtp_passwd" ];
|
||||
|
||||
services.vaultwarden = {
|
||||
enable = true;
|
||||
|
@ -107,7 +110,7 @@ in {
|
|||
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [config.services.vaultwarden.config.rocketPort];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [ config.services.vaultwarden.config.rocketPort ];
|
||||
};
|
||||
|
||||
# Replace uses of old name
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
{ config, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
wireguard.elisabeth = {
|
||||
client.via = "elisabeth";
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [3000 80];
|
||||
firewallRuleForNode.elisabeth.allowedTCPPorts = [
|
||||
3000
|
||||
80
|
||||
];
|
||||
};
|
||||
age.secrets.spotifySecret = {
|
||||
owner = "root";
|
||||
|
|
82
flake.nix
82
flake.nix
|
@ -101,27 +101,29 @@
|
|||
};
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
agenix-rekey,
|
||||
nixos-generators,
|
||||
pre-commit-hooks,
|
||||
devshell,
|
||||
nixvim,
|
||||
nixos-extra-modules,
|
||||
nix-topology,
|
||||
...
|
||||
} @ inputs: let
|
||||
inherit (nixpkgs) lib;
|
||||
stateVersion = "23.05";
|
||||
in
|
||||
outputs =
|
||||
{
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
agenix-rekey,
|
||||
nixos-generators,
|
||||
pre-commit-hooks,
|
||||
devshell,
|
||||
nixvim,
|
||||
nixos-extra-modules,
|
||||
nix-topology,
|
||||
...
|
||||
}@inputs:
|
||||
let
|
||||
inherit (nixpkgs) lib;
|
||||
stateVersion = "23.05";
|
||||
in
|
||||
{
|
||||
secretsConfig = {
|
||||
# This should be a link to one of the age public keys is './keys'
|
||||
masterIdentities = [./keys/PatC.pub];
|
||||
extraEncryptionPubkeys = [./secrets/recipients.txt];
|
||||
masterIdentities = [ ./keys/PatC.pub ];
|
||||
extraEncryptionPubkeys = [ ./secrets/recipients.txt ];
|
||||
};
|
||||
agenix-rekey = agenix-rekey.configure {
|
||||
userFlake = self;
|
||||
|
@ -129,8 +131,7 @@
|
|||
};
|
||||
|
||||
inherit stateVersion;
|
||||
inherit
|
||||
(import ./nix/hosts.nix inputs)
|
||||
inherit (import ./nix/hosts.nix inputs)
|
||||
hosts
|
||||
nixosConfigurations
|
||||
minimalConfigurations
|
||||
|
@ -139,19 +140,20 @@
|
|||
nodes = self.nixosConfigurations // self.guestConfigurations;
|
||||
|
||||
inherit
|
||||
(lib.foldl' lib.recursiveUpdate {}
|
||||
(lib.mapAttrsToList
|
||||
(import ./nix/generate-installer-package.nix inputs)
|
||||
self.minimalConfigurations))
|
||||
(lib.foldl' lib.recursiveUpdate { } (
|
||||
lib.mapAttrsToList (import ./nix/generate-installer-package.nix inputs) self.minimalConfigurations
|
||||
))
|
||||
packages
|
||||
;
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem (system: rec {
|
||||
apps.setupHetznerStorageBoxes = import (nixos-extra-modules + "/apps/setup-hetzner-storage-boxes.nix") {
|
||||
inherit pkgs;
|
||||
nixosConfigurations = self.nodes;
|
||||
decryptIdentity = builtins.head self.secretsConfig.masterIdentities;
|
||||
};
|
||||
apps.setupHetznerStorageBoxes =
|
||||
import (nixos-extra-modules + "/apps/setup-hetzner-storage-boxes.nix")
|
||||
{
|
||||
inherit pkgs;
|
||||
nixosConfigurations = self.nodes;
|
||||
decryptIdentity = builtins.head self.secretsConfig.masterIdentities;
|
||||
};
|
||||
pkgs = import nixpkgs {
|
||||
overlays =
|
||||
import ./lib inputs
|
||||
|
@ -172,7 +174,7 @@
|
|||
topology = import nix-topology {
|
||||
inherit pkgs;
|
||||
modules = [
|
||||
{inherit (self) nixosConfigurations;}
|
||||
{ inherit (self) nixosConfigurations; }
|
||||
./nix/topology.nix
|
||||
];
|
||||
};
|
||||
|
@ -191,19 +193,17 @@
|
|||
.${system};
|
||||
};
|
||||
|
||||
checks.pre-commit-check =
|
||||
pre-commit-hooks.lib.${system}.run
|
||||
{
|
||||
src = lib.cleanSource ./.;
|
||||
hooks = {
|
||||
nixfmt = {
|
||||
enable = true;
|
||||
package = pkgs.nixfmt-rfc-style;
|
||||
};
|
||||
deadnix.enable = true;
|
||||
statix.enable = true;
|
||||
checks.pre-commit-check = pre-commit-hooks.lib.${system}.run {
|
||||
src = lib.cleanSource ./.;
|
||||
hooks = {
|
||||
nixfmt = {
|
||||
enable = true;
|
||||
package = pkgs.nixfmt-rfc-style;
|
||||
};
|
||||
deadnix.enable = true;
|
||||
statix.enable = true;
|
||||
};
|
||||
};
|
||||
devShell = import ./nix/devshell.nix inputs system;
|
||||
formatter = pkgs.nixfmt-rfc-style;
|
||||
});
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ inputs, lib, ... }:
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
inputs.nixos-hardware.nixosModules.common-gpu-nvidia-nonprime
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-intel-cpu-only
|
||||
|
@ -51,9 +48,23 @@
|
|||
device = "/dev/input/event15";
|
||||
};
|
||||
|
||||
boot.binfmt.emulatedSystems = ["aarch64-linux" "riscv64-linux"];
|
||||
nix.settings.system-features = ["kvm" "nixos-test"];
|
||||
boot.kernelParams = lib.mkForce ["rd.luks.options=timeout=0" "rootflags=x-systemd.device-timeout=0" "nohibernate" "root=fstab" "loglevel=4" "nvidia-drm.modeset=1" "nvidia.NVreg_PreserveVideoMemoryAllocations=1"];
|
||||
boot.binfmt.emulatedSystems = [
|
||||
"aarch64-linux"
|
||||
"riscv64-linux"
|
||||
];
|
||||
nix.settings.system-features = [
|
||||
"kvm"
|
||||
"nixos-test"
|
||||
];
|
||||
boot.kernelParams = lib.mkForce [
|
||||
"rd.luks.options=timeout=0"
|
||||
"rootflags=x-systemd.device-timeout=0"
|
||||
"nohibernate"
|
||||
"root=fstab"
|
||||
"loglevel=4"
|
||||
"nvidia-drm.modeset=1"
|
||||
"nvidia.NVreg_PreserveVideoMemoryAllocations=1"
|
||||
];
|
||||
|
||||
services.netbird.enable = true;
|
||||
# Do not cleanup nix store to prevent having to rebuild packages onca a month
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
nodes,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
disko.devices = {
|
||||
disk = {
|
||||
m2-ssd = rec {
|
||||
|
@ -12,9 +13,15 @@
|
|||
content = with lib.disko.gpt; {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = (partEfi "2GiB") // {device = "${device}-part1";};
|
||||
swap = (partSwap "16G") // {device = "${device}-part2";};
|
||||
rpool = (partLuksZfs "m2-ssd" "rpool" "100%") // {device = "${device}-part3";};
|
||||
boot = (partEfi "2GiB") // {
|
||||
device = "${device}-part1";
|
||||
};
|
||||
swap = (partSwap "16G") // {
|
||||
device = "${device}-part2";
|
||||
};
|
||||
rpool = (partLuksZfs "m2-ssd" "rpool" "100%") // {
|
||||
device = "${device}-part3";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -24,13 +31,15 @@
|
|||
content = with lib.disko.gpt; {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
panzer = (partLuksZfs "sata-hdd" "panzer" "100%") // {device = "${device}-part1";};
|
||||
panzer = (partLuksZfs "sata-hdd" "panzer" "100%") // {
|
||||
device = "${device}-part1";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
zpool = with lib.disko.zfs; {
|
||||
rpool = mkZpool {datasets = impermanenceZfsDatasets;};
|
||||
rpool = mkZpool { datasets = impermanenceZfsDatasets; };
|
||||
panzer = mkZpool {
|
||||
datasets = {
|
||||
"local" = unmountable;
|
||||
|
@ -42,8 +51,8 @@
|
|||
fileSystems."/state".neededForBoot = true;
|
||||
fileSystems."/persist".neededForBoot = true;
|
||||
fileSystems."/panzer/state".neededForBoot = true;
|
||||
boot.initrd.systemd.services."zfs-import-panzer".after = ["cryptsetup.target"];
|
||||
boot.initrd.systemd.services."zfs-import-rpool".after = ["cryptsetup.target"];
|
||||
boot.initrd.systemd.services."zfs-import-panzer".after = [ "cryptsetup.target" ];
|
||||
boot.initrd.systemd.services."zfs-import-rpool".after = [ "cryptsetup.target" ];
|
||||
|
||||
wireguard.scrtiny-patrick.client.via = "elisabeth";
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
networking = {
|
||||
inherit (config.secrets.secrets.local.networking) hostId;
|
||||
};
|
||||
|
@ -20,6 +21,6 @@
|
|||
};
|
||||
};
|
||||
};
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = ["lan01"];
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [ "lan01" ];
|
||||
wireguard.samba-patrick.client.via = "elisabeth-samba";
|
||||
}
|
||||
|
|
|
@ -3,29 +3,26 @@
|
|||
minimal,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
imports =
|
||||
[
|
||||
inputs.nixos-hardware.nixosModules.common-pc
|
||||
inputs.nixos-hardware.nixosModules.common-pc-ssd
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd-pstate
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
inputs.nixos-hardware.nixosModules.common-pc
|
||||
inputs.nixos-hardware.nixosModules.common-pc-ssd
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd-pstate
|
||||
|
||||
../../config/basic
|
||||
../../config/basic
|
||||
|
||||
../../config/optional/initrd-ssh.nix
|
||||
../../config/optional/secureboot.nix
|
||||
../../config/optional/zfs.nix
|
||||
../../config/optional/initrd-ssh.nix
|
||||
../../config/optional/secureboot.nix
|
||||
../../config/optional/zfs.nix
|
||||
|
||||
../../config/hardware/physical.nix
|
||||
../../config/hardware/physical.nix
|
||||
|
||||
./blog.nix
|
||||
./net.nix
|
||||
./fs.nix
|
||||
]
|
||||
++ lib.lists.optionals (!minimal) [
|
||||
./guests.nix
|
||||
];
|
||||
./blog.nix
|
||||
./net.nix
|
||||
./fs.nix
|
||||
] ++ lib.lists.optionals (!minimal) [ ./guests.nix ];
|
||||
services.xserver = {
|
||||
xkb = {
|
||||
layout = "de";
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
disko.devices = {
|
||||
disk = {
|
||||
internal-ssd = rec {
|
||||
|
@ -11,8 +8,12 @@
|
|||
content = with lib.disko.gpt; {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = (partEfi "1GiB") // {device = "${device}-part1";};
|
||||
rpool = (partLuksZfs "ssd" "rpool" "100%") // {device = "${device}-part2";};
|
||||
boot = (partEfi "1GiB") // {
|
||||
device = "${device}-part1";
|
||||
};
|
||||
rpool = (partLuksZfs "ssd" "rpool" "100%") // {
|
||||
device = "${device}-part2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -49,7 +50,7 @@
|
|||
};
|
||||
|
||||
zpool = with lib.disko.zfs; {
|
||||
rpool = mkZpool {datasets = impermanenceZfsDatasets;};
|
||||
rpool = mkZpool { datasets = impermanenceZfsDatasets; };
|
||||
panzer = mkZpool {
|
||||
datasets = {
|
||||
"safe/guests" = unmountable;
|
||||
|
@ -128,10 +129,13 @@
|
|||
wireguard.scrtiny-patrick.server = {
|
||||
host = config.secrets.secrets.global.domains.web;
|
||||
port = 51831;
|
||||
reservedAddresses = ["10.44.0.0/16" "fd00:1766::/112"];
|
||||
reservedAddresses = [
|
||||
"10.44.0.0/16"
|
||||
"fd00:1766::/112"
|
||||
];
|
||||
openFirewall = true;
|
||||
};
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = ["scrtiny-patrick"];
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [ "scrtiny-patrick" ];
|
||||
services.scrutiny = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
@ -156,6 +160,6 @@
|
|||
|
||||
fileSystems."/state".neededForBoot = true;
|
||||
fileSystems."/persist".neededForBoot = true;
|
||||
boot.initrd.systemd.services."zfs-import-panzer".after = ["cryptsetup.target"];
|
||||
boot.initrd.systemd.services."zfs-import-renaultft".after = ["cryptsetup.target"];
|
||||
boot.initrd.systemd.services."zfs-import-panzer".after = [ "cryptsetup.target" ];
|
||||
boot.initrd.systemd.services."zfs-import-renaultft".after = [ "cryptsetup.target" ];
|
||||
}
|
||||
|
|
|
@ -6,122 +6,136 @@
|
|||
minimal,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
domainOf = hostName: let
|
||||
domains = {
|
||||
adguardhome = "adguardhome";
|
||||
forgejo = "forge";
|
||||
immich = "immich";
|
||||
nextcloud = "nc";
|
||||
ollama = "ai";
|
||||
paperless = "ppl";
|
||||
ttrss = "rss";
|
||||
vaultwarden = "pw";
|
||||
yourspotify = "sptfy";
|
||||
apispotify = "apisptfy";
|
||||
kanidm = "auth";
|
||||
oauth2-proxy = "oauth2";
|
||||
netbird = "netbird";
|
||||
actual = "actual";
|
||||
firefly = "money";
|
||||
homebox = "homebox";
|
||||
octoprint = "print";
|
||||
pr-tracker = "tracker";
|
||||
};
|
||||
in "${domains.${hostName}}.${config.secrets.secrets.global.domains.web}";
|
||||
}:
|
||||
let
|
||||
domainOf =
|
||||
hostName:
|
||||
let
|
||||
domains = {
|
||||
adguardhome = "adguardhome";
|
||||
forgejo = "forge";
|
||||
immich = "immich";
|
||||
nextcloud = "nc";
|
||||
ollama = "ai";
|
||||
paperless = "ppl";
|
||||
ttrss = "rss";
|
||||
vaultwarden = "pw";
|
||||
yourspotify = "sptfy";
|
||||
apispotify = "apisptfy";
|
||||
kanidm = "auth";
|
||||
oauth2-proxy = "oauth2";
|
||||
netbird = "netbird";
|
||||
actual = "actual";
|
||||
firefly = "money";
|
||||
homebox = "homebox";
|
||||
octoprint = "print";
|
||||
pr-tracker = "tracker";
|
||||
};
|
||||
in
|
||||
"${domains.${hostName}}.${config.secrets.secrets.global.domains.web}";
|
||||
# TODO hard coded elisabeth nicht so schön
|
||||
ipOf = hostName:
|
||||
if nodes ? ${hostName}
|
||||
then nodes.${hostName}.config.wireguard.elisabeth.ipv4
|
||||
else nodes."elisabeth-${hostName}".config.wireguard.elisabeth.ipv4;
|
||||
in {
|
||||
services.nginx = let
|
||||
blockOf = hostName: {
|
||||
virtualHostExtraConfig ? "",
|
||||
maxBodySize ? "500M",
|
||||
port ? 3000,
|
||||
upstream ? hostName,
|
||||
protocol ? "http",
|
||||
}: {
|
||||
upstreams.${hostName} = {
|
||||
servers."${ipOf upstream}:${toString port}" = {};
|
||||
extraConfig = ''
|
||||
zone ${hostName} 64k ;
|
||||
keepalive 5 ;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${domainOf hostName} = {
|
||||
forceSSL = true;
|
||||
useACMEHost = "web";
|
||||
locations."/" = {
|
||||
proxyPass = "${protocol}://${hostName}";
|
||||
proxyWebsockets = true;
|
||||
X-Frame-Options = "SAMEORIGIN";
|
||||
};
|
||||
extraConfig =
|
||||
''
|
||||
client_max_body_size ${maxBodySize} ;
|
||||
''
|
||||
+ virtualHostExtraConfig;
|
||||
};
|
||||
};
|
||||
proxyProtect = hostName: cfg: allowedGroup:
|
||||
lib.mkMerge [
|
||||
(blockOf hostName cfg)
|
||||
ipOf =
|
||||
hostName:
|
||||
if nodes ? ${hostName} then
|
||||
nodes.${hostName}.config.wireguard.elisabeth.ipv4
|
||||
else
|
||||
nodes."elisabeth-${hostName}".config.wireguard.elisabeth.ipv4;
|
||||
in
|
||||
{
|
||||
services.nginx =
|
||||
let
|
||||
blockOf =
|
||||
hostName:
|
||||
{
|
||||
virtualHosts.${domainOf hostName} = {
|
||||
locations."/".extraConfig = ''
|
||||
auth_request /oauth2/auth;
|
||||
error_page 401 = /oauth2/sign_in;
|
||||
|
||||
# pass information via X-User and X-Email headers to backend,
|
||||
# requires running with --set-xauthrequest flag
|
||||
auth_request_set $user $upstream_http_x_auth_request_preferred_username;
|
||||
# Set the email to our own domain in case user change their mail
|
||||
auth_request_set $email "''${upstream_http_x_auth_request_preferred_username}@${config.secrets.secrets.global.domains.web}";
|
||||
proxy_set_header X-User $user;
|
||||
proxy_set_header X-Email $email;
|
||||
|
||||
# if you enabled --cookie-refresh, this is needed for it to work with auth_request
|
||||
auth_request_set $auth_cookie $upstream_http_set_cookie;
|
||||
add_header Set-Cookie $auth_cookie;
|
||||
virtualHostExtraConfig ? "",
|
||||
maxBodySize ? "500M",
|
||||
port ? 3000,
|
||||
upstream ? hostName,
|
||||
protocol ? "http",
|
||||
}:
|
||||
{
|
||||
upstreams.${hostName} = {
|
||||
servers."${ipOf upstream}:${toString port}" = { };
|
||||
extraConfig = ''
|
||||
zone ${hostName} 64k ;
|
||||
keepalive 5 ;
|
||||
'';
|
||||
locations."/oauth2/" = {
|
||||
proxyPass = "http://oauth2-proxy";
|
||||
extraConfig = ''
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
|
||||
'';
|
||||
};
|
||||
|
||||
locations."= /oauth2/auth" = {
|
||||
proxyPass = "http://oauth2-proxy/oauth2/auth" + lib.optionalString allowedGroup "?allowed_groups=${hostName}_access";
|
||||
extraConfig = ''
|
||||
internal;
|
||||
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
# nginx auth_request includes headers but not body
|
||||
proxy_set_header Content-Length "";
|
||||
proxy_pass_request_body off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
in
|
||||
virtualHosts.${domainOf hostName} = {
|
||||
forceSSL = true;
|
||||
useACMEHost = "web";
|
||||
locations."/" = {
|
||||
proxyPass = "${protocol}://${hostName}";
|
||||
proxyWebsockets = true;
|
||||
X-Frame-Options = "SAMEORIGIN";
|
||||
};
|
||||
extraConfig =
|
||||
''
|
||||
client_max_body_size ${maxBodySize} ;
|
||||
''
|
||||
+ virtualHostExtraConfig;
|
||||
};
|
||||
};
|
||||
proxyProtect =
|
||||
hostName: cfg: allowedGroup:
|
||||
lib.mkMerge [
|
||||
(blockOf hostName cfg)
|
||||
{
|
||||
virtualHosts.${domainOf hostName} = {
|
||||
locations."/".extraConfig = ''
|
||||
auth_request /oauth2/auth;
|
||||
error_page 401 = /oauth2/sign_in;
|
||||
|
||||
# pass information via X-User and X-Email headers to backend,
|
||||
# requires running with --set-xauthrequest flag
|
||||
auth_request_set $user $upstream_http_x_auth_request_preferred_username;
|
||||
# Set the email to our own domain in case user change their mail
|
||||
auth_request_set $email "''${upstream_http_x_auth_request_preferred_username}@${config.secrets.secrets.global.domains.web}";
|
||||
proxy_set_header X-User $user;
|
||||
proxy_set_header X-Email $email;
|
||||
|
||||
# if you enabled --cookie-refresh, this is needed for it to work with auth_request
|
||||
auth_request_set $auth_cookie $upstream_http_set_cookie;
|
||||
add_header Set-Cookie $auth_cookie;
|
||||
'';
|
||||
locations."/oauth2/" = {
|
||||
proxyPass = "http://oauth2-proxy";
|
||||
extraConfig = ''
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
|
||||
'';
|
||||
};
|
||||
|
||||
locations."= /oauth2/auth" = {
|
||||
proxyPass =
|
||||
"http://oauth2-proxy/oauth2/auth"
|
||||
+ lib.optionalString allowedGroup "?allowed_groups=${hostName}_access";
|
||||
extraConfig = ''
|
||||
internal;
|
||||
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
# nginx auth_request includes headers but not body
|
||||
proxy_set_header Content-Length "";
|
||||
proxy_pass_request_body off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
in
|
||||
lib.mkMerge [
|
||||
{
|
||||
enable = true;
|
||||
recommendedSetup = true;
|
||||
upstreams.netbird = {
|
||||
servers."${ipOf "netbird"}:80" = {};
|
||||
servers."${ipOf "netbird"}:80" = { };
|
||||
extraConfig = ''
|
||||
zone netbird 64k ;
|
||||
keepalive 5 ;
|
||||
'';
|
||||
};
|
||||
upstreams.netbird-mgmt = {
|
||||
servers."${ipOf "netbird"}:3000" = {};
|
||||
servers."${ipOf "netbird"}:3000" = { };
|
||||
extraConfig = ''
|
||||
zone netbird 64k ;
|
||||
keepalive 5 ;
|
||||
|
@ -159,16 +173,16 @@ in {
|
|||
'';
|
||||
};
|
||||
}
|
||||
(blockOf "vaultwarden" {maxBodySize = "1G";})
|
||||
(blockOf "forgejo" {maxBodySize = "1G";})
|
||||
(blockOf "immich" {maxBodySize = "5G";})
|
||||
(proxyProtect "adguardhome" {} true)
|
||||
(proxyProtect "oauth2-proxy" {} false)
|
||||
(blockOf "paperless" {maxBodySize = "5G";})
|
||||
(proxyProtect "ttrss" {port = 80;} true)
|
||||
(blockOf "yourspotify" {port = 80;})
|
||||
(blockOf "vaultwarden" { maxBodySize = "1G"; })
|
||||
(blockOf "forgejo" { maxBodySize = "1G"; })
|
||||
(blockOf "immich" { maxBodySize = "5G"; })
|
||||
(proxyProtect "adguardhome" { } true)
|
||||
(proxyProtect "oauth2-proxy" { } false)
|
||||
(blockOf "paperless" { maxBodySize = "5G"; })
|
||||
(proxyProtect "ttrss" { port = 80; } true)
|
||||
(blockOf "yourspotify" { port = 80; })
|
||||
#(blockOf "homebox" {})
|
||||
(blockOf "pr-tracker" {})
|
||||
(blockOf "pr-tracker" { })
|
||||
{
|
||||
virtualHosts.${domainOf "pr-tracker"} = {
|
||||
locations."/update" = {
|
||||
|
@ -176,9 +190,9 @@ in {
|
|||
};
|
||||
};
|
||||
}
|
||||
(proxyProtect "ollama" {} true)
|
||||
(proxyProtect "octoprint" {} true)
|
||||
(proxyProtect "firefly" {port = 80;} true)
|
||||
(proxyProtect "ollama" { } true)
|
||||
(proxyProtect "octoprint" { } true)
|
||||
(proxyProtect "firefly" { port = 80; } true)
|
||||
(blockOf "apispotify" {
|
||||
port = 3000;
|
||||
upstream = "yourspotify";
|
||||
|
@ -187,71 +201,76 @@ in {
|
|||
maxBodySize = "5G";
|
||||
port = 80;
|
||||
})
|
||||
(blockOf "kanidm"
|
||||
{
|
||||
protocol = "https";
|
||||
virtualHostExtraConfig = ''
|
||||
proxy_ssl_verify off ;
|
||||
'';
|
||||
})
|
||||
(blockOf "kanidm" {
|
||||
protocol = "https";
|
||||
virtualHostExtraConfig = ''
|
||||
proxy_ssl_verify off ;
|
||||
'';
|
||||
})
|
||||
];
|
||||
|
||||
guests = let
|
||||
mkGuest = guestName: {
|
||||
enablePanzer ? false,
|
||||
enableRenaultFT ? false,
|
||||
enableBunker ? false,
|
||||
enableSharedPaperless ? false,
|
||||
...
|
||||
}: {
|
||||
autostart = true;
|
||||
zfs."/state" = {
|
||||
pool = "rpool";
|
||||
dataset = "local/guests/${guestName}";
|
||||
};
|
||||
zfs."/persist" = {
|
||||
pool = "rpool";
|
||||
dataset = "safe/guests/${guestName}";
|
||||
};
|
||||
zfs."/panzer" = lib.mkIf enablePanzer {
|
||||
pool = "panzer";
|
||||
dataset = "safe/guests/${guestName}";
|
||||
};
|
||||
zfs."/renaultft" = lib.mkIf enableRenaultFT {
|
||||
pool = "renaultft";
|
||||
dataset = "safe/guests/${guestName}";
|
||||
};
|
||||
# kinda not necesarry should be removed on next reimaging
|
||||
zfs."/bunker" = lib.mkIf enableBunker {
|
||||
pool = "panzer";
|
||||
dataset = "bunker/guests/${guestName}";
|
||||
};
|
||||
zfs."/paperless" = lib.mkIf enableSharedPaperless {
|
||||
pool = "panzer";
|
||||
dataset = "bunker/shared/paperless";
|
||||
};
|
||||
modules = [
|
||||
../../config/basic
|
||||
../../config/services/${guestName}.nix
|
||||
guests =
|
||||
let
|
||||
mkGuest =
|
||||
guestName:
|
||||
{
|
||||
node.secretsDir = config.node.secretsDir + "/${guestName}";
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [config.guests.${guestName}.networking.mainLinkName];
|
||||
systemd.network.networks."10-${config.guests.${guestName}.networking.mainLinkName}" = {
|
||||
DHCP = lib.mkForce "no";
|
||||
address = [
|
||||
(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips."${config.guests.${guestName}.nodeName}" config.secrets.secrets.global.net.privateSubnetv4)
|
||||
(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips."${config.guests.${guestName}.nodeName}" config.secrets.secrets.global.net.privateSubnetv6)
|
||||
];
|
||||
gateway = [(lib.net.cidr.host 1 config.secrets.secrets.global.net.privateSubnetv4)];
|
||||
enablePanzer ? false,
|
||||
enableRenaultFT ? false,
|
||||
enableBunker ? false,
|
||||
enableSharedPaperless ? false,
|
||||
...
|
||||
}:
|
||||
{
|
||||
autostart = true;
|
||||
zfs."/state" = {
|
||||
pool = "rpool";
|
||||
dataset = "local/guests/${guestName}";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
zfs."/persist" = {
|
||||
pool = "rpool";
|
||||
dataset = "safe/guests/${guestName}";
|
||||
};
|
||||
zfs."/panzer" = lib.mkIf enablePanzer {
|
||||
pool = "panzer";
|
||||
dataset = "safe/guests/${guestName}";
|
||||
};
|
||||
zfs."/renaultft" = lib.mkIf enableRenaultFT {
|
||||
pool = "renaultft";
|
||||
dataset = "safe/guests/${guestName}";
|
||||
};
|
||||
# kinda not necesarry should be removed on next reimaging
|
||||
zfs."/bunker" = lib.mkIf enableBunker {
|
||||
pool = "panzer";
|
||||
dataset = "bunker/guests/${guestName}";
|
||||
};
|
||||
zfs."/paperless" = lib.mkIf enableSharedPaperless {
|
||||
pool = "panzer";
|
||||
dataset = "bunker/shared/paperless";
|
||||
};
|
||||
modules = [
|
||||
../../config/basic
|
||||
../../config/services/${guestName}.nix
|
||||
{
|
||||
node.secretsDir = config.node.secretsDir + "/${guestName}";
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [
|
||||
config.guests.${guestName}.networking.mainLinkName
|
||||
];
|
||||
systemd.network.networks."10-${config.guests.${guestName}.networking.mainLinkName}" = {
|
||||
DHCP = lib.mkForce "no";
|
||||
address = [
|
||||
(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips."${config.guests.${guestName}.nodeName
|
||||
}" config.secrets.secrets.global.net.privateSubnetv4)
|
||||
(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips."${config.guests.${guestName}.nodeName
|
||||
}" config.secrets.secrets.global.net.privateSubnetv6)
|
||||
];
|
||||
gateway = [ (lib.net.cidr.host 1 config.secrets.secrets.global.net.privateSubnetv4) ];
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
mkMicrovm = guestName: cfg: {
|
||||
${guestName} =
|
||||
mkGuest guestName cfg
|
||||
// {
|
||||
mkMicrovm = guestName: cfg: {
|
||||
${guestName} = mkGuest guestName cfg // {
|
||||
backend = "microvm";
|
||||
microvm = {
|
||||
system = "x86_64-linux";
|
||||
|
@ -264,46 +283,42 @@ in {
|
|||
inherit inputs minimal stateVersion;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mkContainer = guestName: cfg: {
|
||||
${guestName} =
|
||||
mkGuest guestName cfg
|
||||
// {
|
||||
mkContainer = guestName: cfg: {
|
||||
${guestName} = mkGuest guestName cfg // {
|
||||
backend = "container";
|
||||
container.macvlan = "lan";
|
||||
extraSpecialArgs = {
|
||||
inherit lib nodes inputs minimal stateVersion;
|
||||
inherit
|
||||
lib
|
||||
nodes
|
||||
inputs
|
||||
minimal
|
||||
stateVersion
|
||||
;
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{}
|
||||
// mkContainer "adguardhome" {}
|
||||
// mkContainer "oauth2-proxy" {}
|
||||
// mkContainer "vaultwarden" {}
|
||||
// mkContainer "ddclient" {}
|
||||
// mkContainer "ollama" {}
|
||||
// mkContainer "murmur" {}
|
||||
};
|
||||
in
|
||||
{ }
|
||||
// mkContainer "adguardhome" { }
|
||||
// mkContainer "oauth2-proxy" { }
|
||||
// mkContainer "vaultwarden" { }
|
||||
// mkContainer "ddclient" { }
|
||||
// mkContainer "ollama" { }
|
||||
// mkContainer "murmur" { }
|
||||
#// mkContainer "homebox" {}
|
||||
// mkContainer "pr-tracker" {}
|
||||
// mkContainer "ttrss" {}
|
||||
// mkContainer "firefly" {}
|
||||
// mkContainer "yourspotify" {}
|
||||
// mkContainer "netbird" {}
|
||||
// mkContainer "kanidm" {}
|
||||
// mkContainer "nextcloud" {
|
||||
enablePanzer = true;
|
||||
}
|
||||
// mkContainer "paperless" {
|
||||
enableSharedPaperless = true;
|
||||
}
|
||||
// mkContainer "forgejo" {
|
||||
enablePanzer = true;
|
||||
}
|
||||
// mkMicrovm "immich" {
|
||||
enablePanzer = true;
|
||||
}
|
||||
// mkContainer "pr-tracker" { }
|
||||
// mkContainer "ttrss" { }
|
||||
// mkContainer "firefly" { }
|
||||
// mkContainer "yourspotify" { }
|
||||
// mkContainer "netbird" { }
|
||||
// mkContainer "kanidm" { }
|
||||
// mkContainer "nextcloud" { enablePanzer = true; }
|
||||
// mkContainer "paperless" { enableSharedPaperless = true; }
|
||||
// mkContainer "forgejo" { enablePanzer = true; }
|
||||
// mkMicrovm "immich" { enablePanzer = true; }
|
||||
// mkContainer "samba" {
|
||||
enablePanzer = true;
|
||||
enableRenaultFT = true;
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
networking = {
|
||||
inherit (config.secrets.secrets.local.networking) hostId;
|
||||
};
|
||||
systemd.network.networks = {
|
||||
"10-lan01" = {
|
||||
address = [(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips.${config.node.name} config.secrets.secrets.global.net.privateSubnetv4)];
|
||||
gateway = [(lib.net.cidr.host 1 config.secrets.secrets.global.net.privateSubnetv4)];
|
||||
address = [
|
||||
(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips.${config.node.name}
|
||||
config.secrets.secrets.global.net.privateSubnetv4
|
||||
)
|
||||
];
|
||||
gateway = [ (lib.net.cidr.host 1 config.secrets.secrets.global.net.privateSubnetv4) ];
|
||||
#matchConfig.MACAddress = config.secrets.secrets.local.networking.interfaces.lan01.mac;
|
||||
matchConfig.Name = "lan";
|
||||
dhcpV6Config.UseDNS = false;
|
||||
|
@ -33,8 +34,12 @@
|
|||
networks = {
|
||||
# redo the network cause the livesystem has macvlans
|
||||
"10-lan01" = {
|
||||
address = [(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips.${config.node.name} config.secrets.secrets.global.net.privateSubnetv4)];
|
||||
gateway = [(lib.net.cidr.host 1 config.secrets.secrets.global.net.privateSubnetv4)];
|
||||
address = [
|
||||
(lib.net.cidr.hostCidr config.secrets.secrets.global.net.ips.${config.node.name}
|
||||
config.secrets.secrets.global.net.privateSubnetv4
|
||||
)
|
||||
];
|
||||
gateway = [ (lib.net.cidr.host 1 config.secrets.secrets.global.net.privateSubnetv4) ];
|
||||
matchConfig.MACAddress = config.secrets.secrets.local.networking.interfaces.lan01.mac;
|
||||
dhcpV6Config.UseDNS = false;
|
||||
dhcpV4Config.UseDNS = false;
|
||||
|
@ -46,11 +51,16 @@
|
|||
};
|
||||
};
|
||||
};
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = ["lan"];
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [ "lan" ];
|
||||
|
||||
wireguard.elisabeth.server = {
|
||||
host = lib.net.cidr.host config.secrets.secrets.global.net.ips.${config.node.name} config.secrets.secrets.global.net.privateSubnetv4;
|
||||
reservedAddresses = ["10.42.0.0/20" "fd00:1764::/112"];
|
||||
host =
|
||||
lib.net.cidr.host config.secrets.secrets.global.net.ips.${config.node.name}
|
||||
config.secrets.secrets.global.net.privateSubnetv4;
|
||||
reservedAddresses = [
|
||||
"10.42.0.0/20"
|
||||
"fd00:1764::/112"
|
||||
];
|
||||
openFirewall = true;
|
||||
};
|
||||
# To be able to ping containers from the host, it is necessary
|
||||
|
@ -71,7 +81,7 @@
|
|||
email = config.secrets.secrets.global.devEmail;
|
||||
dnsProvider = "cloudflare";
|
||||
dnsPropagationCheck = true;
|
||||
reloadServices = ["nginx"];
|
||||
reloadServices = [ "nginx" ];
|
||||
credentialFiles = {
|
||||
"CF_DNS_API_TOKEN_FILE" = config.age.secrets.cloudflare_token_acme.path;
|
||||
"CF_ZONE_API_TOKEN_FILE" = config.age.secrets.cloudflare_token_acme.path;
|
||||
|
@ -80,9 +90,9 @@
|
|||
};
|
||||
security.acme.certs.web = {
|
||||
domain = config.secrets.secrets.global.domains.web;
|
||||
extraDomainNames = ["*.${config.secrets.secrets.global.domains.web}"];
|
||||
extraDomainNames = [ "*.${config.secrets.secrets.global.domains.web}" ];
|
||||
};
|
||||
users.groups.acme.members = ["nginx"];
|
||||
users.groups.acme.members = [ "nginx" ];
|
||||
environment.persistence."/state".directories = [
|
||||
{
|
||||
directory = "/var/lib/acme";
|
||||
|
|
|
@ -9,6 +9,11 @@
|
|||
./fs.nix
|
||||
];
|
||||
boot.mode = "bios";
|
||||
boot.initrd.availableKernelModules = ["virtio_pci" "virtio_net" "virtio_scsi" "virtio_blk"];
|
||||
boot.initrd.availableKernelModules = [
|
||||
"virtio_pci"
|
||||
"virtio_net"
|
||||
"virtio_scsi"
|
||||
"virtio_blk"
|
||||
];
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
}
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
disko.devices = {
|
||||
disk = {
|
||||
drive = rec {
|
||||
|
@ -11,9 +8,15 @@
|
|||
content = with lib.disko.gpt; {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
grub = partGrub // {device = "${device}-part1";};
|
||||
bios = (partEfi "512MiB") // {device = "${device}-part2";};
|
||||
rpool = (partLuksZfs "rpool" "rpool" "100%") // {device = "${device}-part3";};
|
||||
grub = partGrub // {
|
||||
device = "${device}-part1";
|
||||
};
|
||||
bios = (partEfi "512MiB") // {
|
||||
device = "${device}-part2";
|
||||
};
|
||||
rpool = (partLuksZfs "rpool" "rpool" "100%") // {
|
||||
device = "${device}-part3";
|
||||
};
|
||||
#(lib.attrsets.recursiveUpdate (partLuksZfs "rpool" "rpool" "17GiB" "100%") {content.extraFormatArgs = ["--pbkdf pbkdf2"];})
|
||||
};
|
||||
};
|
||||
|
@ -21,13 +24,11 @@
|
|||
};
|
||||
|
||||
zpool = with lib.disko.zfs; {
|
||||
rpool = mkZpool {datasets = impermanenceZfsDatasets;};
|
||||
rpool = mkZpool { datasets = impermanenceZfsDatasets; };
|
||||
};
|
||||
};
|
||||
|
||||
fileSystems."/state".neededForBoot = true;
|
||||
fileSystems."/persist".neededForBoot = true;
|
||||
boot.loader.grub.devices = [
|
||||
"/dev/disk/by-id/${config.secrets.secrets.local.disko.drive}"
|
||||
];
|
||||
boot.loader.grub.devices = [ "/dev/disk/by-id/${config.secrets.secrets.local.disko.drive}" ];
|
||||
}
|
||||
|
|
|
@ -1,36 +1,37 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
networking.hostId = config.secrets.secrets.local.networking.hostId;
|
||||
networking.domain = config.secrets.secrets.global.domains.mail_public;
|
||||
|
||||
boot.initrd.systemd.network = {
|
||||
enable = true;
|
||||
networks = {inherit (config.systemd.network.networks) "lan01";};
|
||||
networks = {
|
||||
inherit (config.systemd.network.networks) "lan01";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.network.networks = {
|
||||
"lan01" = let
|
||||
icfg = config.secrets.secrets.local.networking.interfaces.lan01;
|
||||
in {
|
||||
address = [
|
||||
icfg.hostCidrv4
|
||||
(lib.net.cidr.hostCidr 1 icfg.hostCidrv6)
|
||||
];
|
||||
gateway = ["fe80::1"];
|
||||
routes = [
|
||||
{Destination = "172.31.1.1";}
|
||||
{
|
||||
Gateway = "172.31.1.1";
|
||||
GatewayOnLink = true;
|
||||
}
|
||||
];
|
||||
matchConfig.MACAddress = icfg.mac;
|
||||
networkConfig.IPv6PrivacyExtensions = "yes";
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
"lan01" =
|
||||
let
|
||||
icfg = config.secrets.secrets.local.networking.interfaces.lan01;
|
||||
in
|
||||
{
|
||||
address = [
|
||||
icfg.hostCidrv4
|
||||
(lib.net.cidr.hostCidr 1 icfg.hostCidrv6)
|
||||
];
|
||||
gateway = [ "fe80::1" ];
|
||||
routes = [
|
||||
{ Destination = "172.31.1.1"; }
|
||||
{
|
||||
Gateway = "172.31.1.1";
|
||||
GatewayOnLink = true;
|
||||
}
|
||||
];
|
||||
matchConfig.MACAddress = icfg.mac;
|
||||
networkConfig.IPv6PrivacyExtensions = "yes";
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
};
|
||||
age.secrets.cloudflare_token_acme = {
|
||||
rekeyFile = ./secrets/cloudflare_api_token.age;
|
||||
|
@ -43,25 +44,25 @@
|
|||
email = config.secrets.secrets.global.devEmail;
|
||||
dnsProvider = "cloudflare";
|
||||
dnsPropagationCheck = true;
|
||||
reloadServices = ["nginx"];
|
||||
reloadServices = [ "nginx" ];
|
||||
credentialFiles = {
|
||||
"CF_DNS_API_TOKEN_FILE" = config.age.secrets.cloudflare_token_acme.path;
|
||||
"CF_ZONE_API_TOKEN_FILE" = config.age.secrets.cloudflare_token_acme.path;
|
||||
};
|
||||
};
|
||||
};
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = ["lan01"];
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [ "lan01" ];
|
||||
security.acme.certs = {
|
||||
mail_public = {
|
||||
domain = config.secrets.secrets.global.domains.mail_public;
|
||||
extraDomainNames = ["*.${config.secrets.secrets.global.domains.mail_public}"];
|
||||
extraDomainNames = [ "*.${config.secrets.secrets.global.domains.mail_public}" ];
|
||||
};
|
||||
mail_private = {
|
||||
domain = config.secrets.secrets.global.domains.mail_private;
|
||||
extraDomainNames = ["*.${config.secrets.secrets.global.domains.mail_private}"];
|
||||
extraDomainNames = [ "*.${config.secrets.secrets.global.domains.mail_private}" ];
|
||||
};
|
||||
};
|
||||
users.groups.acme.members = ["maddy"];
|
||||
users.groups.acme.members = [ "maddy" ];
|
||||
environment.persistence."/state".directories = [
|
||||
{
|
||||
directory = "/var/lib/acme";
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ inputs, lib, ... }:
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
../../config/basic
|
||||
../../config/services/octoprint.nix
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
{lib, ...}: {
|
||||
{ lib, ... }:
|
||||
{
|
||||
fileSystems = lib.mkForce {
|
||||
"/" = {
|
||||
device = "/dev/disk/by-uuid/44444444-4444-4444-8888-888888888888";
|
||||
fsType = "ext4";
|
||||
};
|
||||
};
|
||||
environment.persistence = lib.mkForce {};
|
||||
environment.persistence = lib.mkForce { };
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
networking = {
|
||||
inherit (config.secrets.secrets.local.networking) hostId;
|
||||
wireless.iwd = {
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ inputs, lib, ... }:
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-intel
|
||||
# for some reasons the cpu-intel includes the gpu as well
|
||||
|
@ -45,11 +42,12 @@
|
|||
layout = "de";
|
||||
};
|
||||
libinput = {
|
||||
touchpad = lib.mkForce {
|
||||
accelSpeed = "0.5";
|
||||
};
|
||||
touchpad = lib.mkForce { accelSpeed = "0.5"; };
|
||||
};
|
||||
};
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
nix.settings.system-features = ["kvm" "nixos-test"];
|
||||
nix.settings.system-features = [
|
||||
"kvm"
|
||||
"nixos-test"
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
disko.devices = {
|
||||
disk = {
|
||||
m2-ssd = rec {
|
||||
|
@ -11,15 +8,21 @@
|
|||
content = with lib.disko.gpt; {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = (partEfi "1GiB") // {device = "${device}-part1";};
|
||||
swap = (partSwap "16GiB") // {device = "${device}-part2";};
|
||||
rpool = (partLuksZfs "rpool" "rpool" "100%") // {device = "${device}-part3";};
|
||||
boot = (partEfi "1GiB") // {
|
||||
device = "${device}-part1";
|
||||
};
|
||||
swap = (partSwap "16GiB") // {
|
||||
device = "${device}-part2";
|
||||
};
|
||||
rpool = (partLuksZfs "rpool" "rpool" "100%") // {
|
||||
device = "${device}-part3";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
zpool = with lib.disko.zfs; {
|
||||
rpool = mkZpool {datasets = impermanenceZfsDatasets;};
|
||||
rpool = mkZpool { datasets = impermanenceZfsDatasets; };
|
||||
};
|
||||
};
|
||||
fileSystems."/state".neededForBoot = true;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
age.secrets.eduroam = {
|
||||
rekeyFile = ./secrets/iwd/eduroam.8021x.age;
|
||||
path = "/var/lib/iwd/eduroam.8021x";
|
||||
|
@ -10,7 +11,11 @@
|
|||
devoloog-sae20.rekeyFile = ./secrets/iwd/devoloog-sae20.age;
|
||||
};
|
||||
wireguard.samba-patrick.client.via = "elisabeth-samba";
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = ["lan01" "lan02" "wlan01"];
|
||||
networking.nftables.firewall.zones.untrusted.interfaces = [
|
||||
"lan01"
|
||||
"lan02"
|
||||
"wlan01"
|
||||
];
|
||||
networking = {
|
||||
inherit (config.secrets.secrets.local.networking) hostId;
|
||||
wireless.iwd = {
|
||||
|
@ -36,7 +41,7 @@
|
|||
IPv6PrivacyExtensions = "yes";
|
||||
MulticastDNS = true;
|
||||
};
|
||||
dns = ["1.1.1.1"];
|
||||
dns = [ "1.1.1.1" ];
|
||||
dhcpV4Config.RouteMetric = 10;
|
||||
dhcpV6Config.RouteMetric = 10;
|
||||
};
|
||||
|
@ -47,7 +52,7 @@
|
|||
IPv6PrivacyExtensions = "yes";
|
||||
MulticastDNS = true;
|
||||
};
|
||||
dns = ["1.1.1.1"];
|
||||
dns = [ "1.1.1.1" ];
|
||||
dhcpV4Config.RouteMetric = 10;
|
||||
dhcpV6Config.RouteMetric = 10;
|
||||
};
|
||||
|
@ -58,7 +63,7 @@
|
|||
IPv6PrivacyExtensions = "yes";
|
||||
MulticastDNS = true;
|
||||
};
|
||||
dns = ["1.1.1.1"];
|
||||
dns = [ "1.1.1.1" ];
|
||||
dhcpV4Config.RouteMetric = 40;
|
||||
dhcpV6Config.RouteMetric = 40;
|
||||
};
|
||||
|
|
|
@ -1,3 +1 @@
|
|||
inputs: [
|
||||
(import ./misc.nix inputs)
|
||||
]
|
||||
inputs: [ (import ./misc.nix inputs) ]
|
||||
|
|
16
lib/misc.nix
16
lib/misc.nix
|
@ -1,11 +1,9 @@
|
|||
_inputs: _self: super: let
|
||||
_inputs: _self: super:
|
||||
let
|
||||
writeText = text: (super.writeText (builtins.hashString "sha256" "${text}") "${text}");
|
||||
in {
|
||||
lib =
|
||||
super.lib
|
||||
// {
|
||||
inherit
|
||||
writeText
|
||||
;
|
||||
};
|
||||
in
|
||||
{
|
||||
lib = super.lib // {
|
||||
inherit writeText;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
inherit (lib)
|
||||
mkEnableOption
|
||||
mkMerge
|
||||
attrNames
|
||||
|
@ -18,7 +14,8 @@
|
|||
mapAttrs'
|
||||
listToAttrs
|
||||
;
|
||||
in {
|
||||
in
|
||||
{
|
||||
home-manager.sharedModules = [
|
||||
{
|
||||
options.images = {
|
||||
|
@ -39,26 +36,25 @@ in {
|
|||
|
||||
imports = [
|
||||
(
|
||||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
age.secrets = mkMerge (
|
||||
flip map
|
||||
(attrNames config.home-manager.users)
|
||||
(
|
||||
flip map (attrNames config.home-manager.users) (
|
||||
user:
|
||||
mkIf config.home-manager.users.${user}.images.enable (
|
||||
listToAttrs (flip map (attrNames (filterAttrs (_: type: type == "regular") (builtins.readDir ../secrets/img)))
|
||||
(
|
||||
file: {
|
||||
name = "images-${user}-${file}";
|
||||
value = {
|
||||
name = removeSuffix ".age" file;
|
||||
rekeyFile = ../secrets/img/${file};
|
||||
owner = user;
|
||||
group = user;
|
||||
};
|
||||
}
|
||||
))
|
||||
mkIf config.home-manager.users.${user}.images.enable (
|
||||
listToAttrs (
|
||||
flip map (attrNames (filterAttrs (_: type: type == "regular") (builtins.readDir ../secrets/img)))
|
||||
(file: {
|
||||
name = "images-${user}-${file}";
|
||||
value = {
|
||||
name = removeSuffix ".age" file;
|
||||
rekeyFile = ../secrets/img/${file};
|
||||
owner = user;
|
||||
group = user;
|
||||
};
|
||||
})
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
inherit (lib)
|
||||
flip
|
||||
mapAttrs
|
||||
attrNames
|
||||
|
@ -13,7 +9,8 @@
|
|||
mkMerge
|
||||
isAttrs
|
||||
;
|
||||
in {
|
||||
in
|
||||
{
|
||||
# Expose a home manager module for each user that allows extending
|
||||
# environment.persistence.${sourceDir}.users.${userName} simply by
|
||||
# specifying home.persistence.${sourceDir} in home manager.
|
||||
|
@ -21,74 +18,63 @@ in {
|
|||
{
|
||||
options.home.persistence = mkOption {
|
||||
description = "Additional persistence config for the given source path";
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
files = mkOption {
|
||||
description = "Additional files to persist via NixOS impermanence.";
|
||||
type = types.listOf (types.either types.attrs types.str);
|
||||
default = [];
|
||||
};
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options = {
|
||||
files = mkOption {
|
||||
description = "Additional files to persist via NixOS impermanence.";
|
||||
type = types.listOf (types.either types.attrs types.str);
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
directories = mkOption {
|
||||
description = "Additional directories to persist via NixOS impermanence.";
|
||||
type = types.listOf (types.either types.attrs types.str);
|
||||
default = [];
|
||||
directories = mkOption {
|
||||
description = "Additional directories to persist via NixOS impermanence.";
|
||||
type = types.listOf (types.either types.attrs types.str);
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
# For each user that has a home-manager config, merge the locally defined
|
||||
# persistence options that we defined above.
|
||||
imports = let
|
||||
mkUserFiles = map (x:
|
||||
{parentDirectory.mode = "700";}
|
||||
// (
|
||||
if isAttrs x
|
||||
then x
|
||||
else {file = x;}
|
||||
));
|
||||
mkUserDirs = map (x:
|
||||
{mode = "700";}
|
||||
// (
|
||||
if isAttrs x
|
||||
then x
|
||||
else {directory = x;}
|
||||
));
|
||||
in [
|
||||
{
|
||||
environment.persistence = mkMerge (
|
||||
flip map
|
||||
(attrNames config.home-manager.users)
|
||||
(
|
||||
user: let
|
||||
hmUserCfg = config.home-manager.users.${user};
|
||||
in
|
||||
flip mapAttrs hmUserCfg.home.persistence
|
||||
(_: sourceCfg: {
|
||||
users.${user} = {
|
||||
# This needs to be set for allo users with non
|
||||
# standart home (not /home/<userName>
|
||||
# due to nixpkgs it
|
||||
# can't be deduced from homeDirectory
|
||||
# as there will be infinite recursion
|
||||
# If this setting is forgotten there
|
||||
# are assertions in place warning you
|
||||
home =
|
||||
{
|
||||
root = "/root";
|
||||
}
|
||||
.${user}
|
||||
or "/home/${user}";
|
||||
files = mkUserFiles sourceCfg.files;
|
||||
directories = mkUserDirs sourceCfg.directories;
|
||||
};
|
||||
})
|
||||
)
|
||||
imports =
|
||||
let
|
||||
mkUserFiles = map (
|
||||
x: { parentDirectory.mode = "700"; } // (if isAttrs x then x else { file = x; })
|
||||
);
|
||||
}
|
||||
];
|
||||
mkUserDirs = map (x: { mode = "700"; } // (if isAttrs x then x else { directory = x; }));
|
||||
in
|
||||
[
|
||||
{
|
||||
environment.persistence = mkMerge (
|
||||
flip map (attrNames config.home-manager.users) (
|
||||
user:
|
||||
let
|
||||
hmUserCfg = config.home-manager.users.${user};
|
||||
in
|
||||
flip mapAttrs hmUserCfg.home.persistence (
|
||||
_: sourceCfg: {
|
||||
users.${user} = {
|
||||
# This needs to be set for allo users with non
|
||||
# standart home (not /home/<userName>
|
||||
# due to nixpkgs it
|
||||
# can't be deduced from homeDirectory
|
||||
# as there will be infinite recursion
|
||||
# If this setting is forgotten there
|
||||
# are assertions in place warning you
|
||||
home = { root = "/root"; }.${user} or "/home/${user}";
|
||||
files = mkUserFiles sourceCfg.files;
|
||||
directories = mkUserDirs sourceCfg.directories;
|
||||
};
|
||||
}
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,58 +1,59 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
{ lib, pkgs, ... }:
|
||||
let
|
||||
inherit (lib)
|
||||
types
|
||||
mkEnableOption
|
||||
mkPackageOption
|
||||
mkOption
|
||||
mkIf
|
||||
;
|
||||
settingsFormat = pkgs.formats.json {};
|
||||
in {
|
||||
settingsFormat = pkgs.formats.json { };
|
||||
in
|
||||
{
|
||||
home-manager.sharedModules = [
|
||||
({config, ...}: let
|
||||
cfg = settingsFormat.generate "config.json" {
|
||||
streamdeck_ui_version = 2;
|
||||
state = config.programs.streamdeck-ui.settings;
|
||||
};
|
||||
preStart = pkgs.writeShellScript "streamdeck-setup-config" ''
|
||||
${pkgs.coreutils}/bin/cp "${cfg}" "$XDG_RUNTIME_DIR/streamdeck/config.json"
|
||||
'';
|
||||
in {
|
||||
options.programs.streamdeck-ui = {
|
||||
enable = mkEnableOption "streamdeck-ui";
|
||||
package = mkPackageOption pkgs "streamdeck-ui" {};
|
||||
settings = mkOption {
|
||||
default = {};
|
||||
type = types.submodule {freeformType = settingsFormat.type;};
|
||||
description = "Configuration per streamdeck";
|
||||
(
|
||||
{ config, ... }:
|
||||
let
|
||||
cfg = settingsFormat.generate "config.json" {
|
||||
streamdeck_ui_version = 2;
|
||||
state = config.programs.streamdeck-ui.settings;
|
||||
};
|
||||
};
|
||||
config = mkIf config.programs.streamdeck-ui.enable {
|
||||
systemd.user = {
|
||||
services = {
|
||||
streamdeck = {
|
||||
Unit = {
|
||||
Description = "start streamdeck-ui";
|
||||
# For some reason this depends on X or wayland running
|
||||
ConditionEnvironment = ["DISPLAY"];
|
||||
preStart = pkgs.writeShellScript "streamdeck-setup-config" ''
|
||||
${pkgs.coreutils}/bin/cp "${cfg}" "$XDG_RUNTIME_DIR/streamdeck/config.json"
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.programs.streamdeck-ui = {
|
||||
enable = mkEnableOption "streamdeck-ui";
|
||||
package = mkPackageOption pkgs "streamdeck-ui" { };
|
||||
settings = mkOption {
|
||||
default = { };
|
||||
type = types.submodule { freeformType = settingsFormat.type; };
|
||||
description = "Configuration per streamdeck";
|
||||
};
|
||||
};
|
||||
config = mkIf config.programs.streamdeck-ui.enable {
|
||||
systemd.user = {
|
||||
services = {
|
||||
streamdeck = {
|
||||
Unit = {
|
||||
Description = "start streamdeck-ui";
|
||||
# For some reason this depends on X or wayland running
|
||||
ConditionEnvironment = [ "DISPLAY" ];
|
||||
};
|
||||
Service = {
|
||||
Type = "exec";
|
||||
ExecStart = "${pkgs.streamdeck-ui}/bin/streamdeck --no-ui";
|
||||
ExecStartPre = preStart;
|
||||
Environment = ''STREAMDECK_UI_CONFIG=%t/streamdeck/config.json'';
|
||||
RuntimeDirectory = "streamdeck";
|
||||
};
|
||||
Install.WantedBy = [ "graphical-session.target" ];
|
||||
};
|
||||
Service = {
|
||||
Type = "exec";
|
||||
ExecStart = "${pkgs.streamdeck-ui}/bin/streamdeck --no-ui";
|
||||
ExecStartPre = preStart;
|
||||
Environment = ''STREAMDECK_UI_CONFIG=%t/streamdeck/config.json'';
|
||||
RuntimeDirectory = "streamdeck";
|
||||
};
|
||||
Install.WantedBy = ["graphical-session.target"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
types
|
||||
mkEnableOption
|
||||
mkPackageOption
|
||||
|
@ -15,13 +15,14 @@
|
|||
cfg = config.services.actual;
|
||||
configFile = formatType.generate "config.json" cfg.settings;
|
||||
|
||||
formatType = pkgs.formats.json {};
|
||||
in {
|
||||
formatType = pkgs.formats.json { };
|
||||
in
|
||||
{
|
||||
options.services.actual = {
|
||||
enable = mkEnableOption "actual, a privacy focused app for managing your finances";
|
||||
package = mkPackageOption pkgs "actual" {};
|
||||
package = mkPackageOption pkgs "actual" { };
|
||||
settings = mkOption {
|
||||
default = {};
|
||||
default = { };
|
||||
type = types.submodule {
|
||||
freeformType = formatType.type;
|
||||
config = {
|
||||
|
@ -33,7 +34,7 @@ in {
|
|||
};
|
||||
};
|
||||
config.systemd.services.actual = {
|
||||
after = ["network.target"];
|
||||
after = [ "network.target" ];
|
||||
environment.ACTUAL_CONFIG_PATH = configFile;
|
||||
serviceConfig = {
|
||||
ExecStartPre = "${pkgs.coreutils}/bin/ln -sf ${cfg.package}/migrations /var/lib/actual/";
|
||||
|
@ -78,6 +79,6 @@ in {
|
|||
];
|
||||
UMask = "0077";
|
||||
};
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
inherit (lib)
|
||||
concatLists
|
||||
flip
|
||||
mapAttrsToList
|
||||
|
@ -16,65 +12,86 @@
|
|||
;
|
||||
|
||||
cfg = config.users.deterministicIds;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options = {
|
||||
users.deterministicIds = mkOption {
|
||||
default = {};
|
||||
default = { };
|
||||
description = mdDoc ''
|
||||
Maps a user or group name to its expected uid/gid values. If a user/group is
|
||||
used on the system without specifying a uid/gid, this module will assign the
|
||||
corresponding ids defined here, or show an error if the definition is missing.
|
||||
'';
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
uid = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = mdDoc "The uid to assign if it is missing in `users.users.<name>`.";
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options = {
|
||||
uid = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = mdDoc "The uid to assign if it is missing in `users.users.<name>`.";
|
||||
};
|
||||
gid = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = mdDoc "The gid to assign if it is missing in `users.groups.<name>`.";
|
||||
};
|
||||
};
|
||||
gid = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = mdDoc "The gid to assign if it is missing in `users.groups.<name>`.";
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
users.users = mkOption {
|
||||
type = types.attrsOf (types.submodule ({name, ...}: {
|
||||
config.uid = let
|
||||
deterministicUid = cfg.${name}.uid or null;
|
||||
in
|
||||
mkIf (deterministicUid != null) (mkDefault deterministicUid);
|
||||
}));
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
config.uid =
|
||||
let
|
||||
deterministicUid = cfg.${name}.uid or null;
|
||||
in
|
||||
mkIf (deterministicUid != null) (mkDefault deterministicUid);
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
users.groups = mkOption {
|
||||
type = types.attrsOf (types.submodule ({name, ...}: {
|
||||
config.gid = let
|
||||
deterministicGid = cfg.${name}.gid or null;
|
||||
in
|
||||
mkIf (deterministicGid != null) (mkDefault deterministicGid);
|
||||
}));
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
config.gid =
|
||||
let
|
||||
deterministicGid = cfg.${name}.gid or null;
|
||||
in
|
||||
mkIf (deterministicGid != null) (mkDefault deterministicGid);
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
assertions =
|
||||
concatLists (flip mapAttrsToList config.users.users (name: user: [
|
||||
{
|
||||
assertion = user.uid != null;
|
||||
message = "non-deterministic uid detected for '${name}', please assign one via `users.deterministicIds`";
|
||||
concatLists (
|
||||
flip mapAttrsToList config.users.users (
|
||||
name: user: [
|
||||
{
|
||||
assertion = user.uid != null;
|
||||
message = "non-deterministic uid detected for '${name}', please assign one via `users.deterministicIds`";
|
||||
}
|
||||
{
|
||||
assertion = !user.autoSubUidGidRange;
|
||||
message = "non-deterministic subUids/subGids detected for: ${name}";
|
||||
}
|
||||
]
|
||||
)
|
||||
)
|
||||
++ flip mapAttrsToList config.users.groups (
|
||||
name: group: {
|
||||
assertion = group.gid != null;
|
||||
message = "non-deterministic gid detected for '${name}', please assign one via `users.deterministicIds`";
|
||||
}
|
||||
{
|
||||
assertion = !user.autoSubUidGidRange;
|
||||
message = "non-deterministic subUids/subGids detected for: ${name}";
|
||||
}
|
||||
]))
|
||||
++ flip mapAttrsToList config.users.groups (name: group: {
|
||||
assertion = group.gid != null;
|
||||
message = "non-deterministic gid detected for '${name}', please assign one via `users.deterministicIds`";
|
||||
});
|
||||
);
|
||||
};
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
lib,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
attrNames
|
||||
concatMap
|
||||
concatStringsSep
|
||||
|
@ -21,16 +21,20 @@
|
|||
;
|
||||
|
||||
nodeName = config.node.name;
|
||||
mkForwardedOption = path:
|
||||
mkForwardedOption =
|
||||
path:
|
||||
mkOption {
|
||||
type = mkOptionType {
|
||||
name = "Same type that the receiving option `${concatStringsSep "." path}` normally accepts.";
|
||||
merge = _loc: defs:
|
||||
builtins.filter
|
||||
(x: builtins.isAttrs x -> ((x._type or "") != "__distributed_config_empty"))
|
||||
(map (x: x.value) defs);
|
||||
merge =
|
||||
_loc: defs:
|
||||
builtins.filter (x: builtins.isAttrs x -> ((x._type or "") != "__distributed_config_empty")) (
|
||||
map (x: x.value) defs
|
||||
);
|
||||
};
|
||||
default = {
|
||||
_type = "__distributed_config_empty";
|
||||
};
|
||||
default = {_type = "__distributed_config_empty";};
|
||||
description = ''
|
||||
Anything specified here will be forwarded to `${concatStringsSep "." path}`
|
||||
on the given node. Forwarding happens as-is to the raw values,
|
||||
|
@ -39,28 +43,39 @@
|
|||
};
|
||||
|
||||
forwardedOptions = [
|
||||
["age" "secrets"]
|
||||
["services" "maddy" "ensureCredentials"]
|
||||
[
|
||||
"age"
|
||||
"secrets"
|
||||
]
|
||||
[
|
||||
"services"
|
||||
"maddy"
|
||||
"ensureCredentials"
|
||||
]
|
||||
];
|
||||
|
||||
attrsForEachOption = f: (foldl' (acc: path: recursiveUpdate acc (setAttrByPath path (f path))) {} forwardedOptions);
|
||||
in {
|
||||
attrsForEachOption =
|
||||
f: (foldl' (acc: path: recursiveUpdate acc (setAttrByPath path (f path))) { } forwardedOptions);
|
||||
in
|
||||
{
|
||||
options.nodes = mkOption {
|
||||
description = "Options forwareded to the given node.";
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = attrsForEachOption mkForwardedOption;
|
||||
});
|
||||
default = { };
|
||||
type = types.attrsOf (types.submodule { options = attrsForEachOption mkForwardedOption; });
|
||||
};
|
||||
|
||||
config = let
|
||||
mergeConfigFromOthers = let
|
||||
getConfig = path: otherNode: let
|
||||
cfg = nodes.${otherNode}.config.nodes.${nodeName} or null;
|
||||
in
|
||||
optionals (cfg != null) (getAttrFromPath path cfg);
|
||||
config =
|
||||
let
|
||||
mergeConfigFromOthers =
|
||||
let
|
||||
getConfig =
|
||||
path: otherNode:
|
||||
let
|
||||
cfg = nodes.${otherNode}.config.nodes.${nodeName} or null;
|
||||
in
|
||||
optionals (cfg != null) (getAttrFromPath path cfg);
|
||||
in
|
||||
path: mkMerge (concatMap (getConfig path) (attrNames nodes));
|
||||
in
|
||||
path: mkMerge (concatMap (getConfig path) (attrNames nodes));
|
||||
in
|
||||
attrsForEachOption mergeConfigFromOthers;
|
||||
}
|
||||
|
|
|
@ -3,20 +3,21 @@
|
|||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.services.homebox;
|
||||
inherit
|
||||
(lib)
|
||||
inherit (lib)
|
||||
mkEnableOption
|
||||
mkPackageOption
|
||||
mkDefault
|
||||
types
|
||||
mkIf
|
||||
;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.homebox = {
|
||||
enable = mkEnableOption "homebox";
|
||||
package = mkPackageOption pkgs "homebox" {};
|
||||
package = mkPackageOption pkgs "homebox" { };
|
||||
settings = lib.mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
defaultText = ''
|
||||
|
@ -39,7 +40,7 @@ in {
|
|||
HBOX_MODE = mkDefault "production";
|
||||
};
|
||||
systemd.services.homebox = {
|
||||
after = ["network.target"];
|
||||
after = [ "network.target" ];
|
||||
environment = cfg.settings;
|
||||
serviceConfig = {
|
||||
User = "homebox";
|
||||
|
@ -86,8 +87,8 @@ in {
|
|||
# System Call Filtering
|
||||
UMask = "0077";
|
||||
};
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
};
|
||||
meta.maintainers = with lib.maintainers; [patrickdag];
|
||||
meta.maintainers = with lib.maintainers; [ patrickdag ];
|
||||
}
|
||||
|
|
228
modules/iwd.nix
228
modules/iwd.nix
|
@ -3,103 +3,141 @@
|
|||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.networking.wireless.iwd = let
|
||||
inherit
|
||||
(lib)
|
||||
mkOption
|
||||
literalExample
|
||||
types
|
||||
hasAttrByPath
|
||||
;
|
||||
in {
|
||||
networks = mkOption {
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{ "karlsruhe.freifunk.net" = {};
|
||||
};
|
||||
'';
|
||||
|
||||
description = ''
|
||||
Declarative configuration of wifi networks for
|
||||
<citerefentry><refentrytitle>iwd</refentrytitle><manvolnum>8</manvolnum></citerefentry>.
|
||||
|
||||
All networks will be stored in
|
||||
<literal>/var/lib/iwd/<name>.<type></literal>.
|
||||
|
||||
Since each network is stored in its own file, declarative networks can be used in an
|
||||
environment with imperatively added networks via
|
||||
<citerefentry><refentrytitle>iwctl</refentrytitle><manvolnum>1</manvolnum></citerefentry>.
|
||||
'';
|
||||
|
||||
type = types.attrsOf (types.submodule ({config, ...}: {
|
||||
config.kind =
|
||||
if (hasAttrByPath ["Security" "Passphrase"] config.settings)
|
||||
then "psk"
|
||||
else if !(hasAttrByPath ["Security"] config.settings)
|
||||
then "open"
|
||||
else "8021x";
|
||||
|
||||
options = {
|
||||
kind = mkOption {
|
||||
type = types.enum ["open" "psk" "8021x"];
|
||||
description = "The type of network. This will determine the file ending. The module will try to determine this automatically so this should only be set when the heuristics fail.";
|
||||
}:
|
||||
{
|
||||
options.networking.wireless.iwd =
|
||||
let
|
||||
inherit (lib)
|
||||
mkOption
|
||||
literalExample
|
||||
types
|
||||
hasAttrByPath
|
||||
;
|
||||
in
|
||||
{
|
||||
networks = mkOption {
|
||||
default = { };
|
||||
example = literalExample ''
|
||||
{ "karlsruhe.freifunk.net" = {};
|
||||
};
|
||||
settings = mkOption {
|
||||
type = with types; (attrsOf (attrsOf (oneOf [str path])));
|
||||
description = ''
|
||||
Contents of the iwd config file for this network
|
||||
The lowest level values should be files, that will be read into the config files
|
||||
'';
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
inherit
|
||||
(lib)
|
||||
mkIf
|
||||
flip
|
||||
mapAttrsToList
|
||||
concatStringsSep
|
||||
;
|
||||
cfg = config.networking.wireless.iwd;
|
||||
|
||||
encoder = pkgs.writeScriptBin "encoder" ''
|
||||
#! ${pkgs.runtimeShell} -e
|
||||
|
||||
# Extract file-ext from network names
|
||||
ext="$(sed -re 's/.*\.(8021x|open|psk)$/\1/' <<< "$*")"
|
||||
to_enc="$(sed -re "s/(.*)\.$ext/\1/g" <<< "$*")"
|
||||
|
||||
# Encode ssid (excluding file-extensio) as base64 if needed
|
||||
[[ "$to_enc" =~ ^[[:alnum:]]+$ ]] && { echo "$to_enc.$ext"; exit 0; }
|
||||
echo "=$(printf "$to_enc" | ${pkgs.unixtools.xxd}/bin/xxd -pu).$ext"
|
||||
'';
|
||||
in
|
||||
mkIf cfg.enable {
|
||||
systemd.services.iwd = mkIf (cfg.networks != {}) {
|
||||
path = [encoder];
|
||||
preStart = let
|
||||
dataDir = "/var/lib/iwd";
|
||||
in ''
|
||||
# Create config files for declaratively defined networks in the NixOS config.
|
||||
${concatStringsSep "\n" (flip mapAttrsToList cfg.networks (network: config: ''
|
||||
filename=${dataDir}/"$(encoder '${network}.${config.kind}')"
|
||||
touch "$filename"
|
||||
cat >$filename <<EOF
|
||||
${concatStringsSep "\n" (flip mapAttrsToList config.settings (toplevel: config: ''
|
||||
[${toplevel}]
|
||||
${concatStringsSep "\n" (flip mapAttrsToList config (name: value: ''
|
||||
${name}=$(<${value})
|
||||
''))}
|
||||
''))}
|
||||
EOF
|
||||
''))}
|
||||
'';
|
||||
|
||||
description = ''
|
||||
Declarative configuration of wifi networks for
|
||||
<citerefentry><refentrytitle>iwd</refentrytitle><manvolnum>8</manvolnum></citerefentry>.
|
||||
|
||||
All networks will be stored in
|
||||
<literal>/var/lib/iwd/<name>.<type></literal>.
|
||||
|
||||
Since each network is stored in its own file, declarative networks can be used in an
|
||||
environment with imperatively added networks via
|
||||
<citerefentry><refentrytitle>iwctl</refentrytitle><manvolnum>1</manvolnum></citerefentry>.
|
||||
'';
|
||||
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ config, ... }:
|
||||
{
|
||||
config.kind =
|
||||
if
|
||||
(hasAttrByPath [
|
||||
"Security"
|
||||
"Passphrase"
|
||||
] config.settings)
|
||||
then
|
||||
"psk"
|
||||
else if !(hasAttrByPath [ "Security" ] config.settings) then
|
||||
"open"
|
||||
else
|
||||
"8021x";
|
||||
|
||||
options = {
|
||||
kind = mkOption {
|
||||
type = types.enum [
|
||||
"open"
|
||||
"psk"
|
||||
"8021x"
|
||||
];
|
||||
description = "The type of network. This will determine the file ending. The module will try to determine this automatically so this should only be set when the heuristics fail.";
|
||||
};
|
||||
settings = mkOption {
|
||||
type =
|
||||
with types;
|
||||
(attrsOf (
|
||||
attrsOf (oneOf [
|
||||
str
|
||||
path
|
||||
])
|
||||
));
|
||||
description = ''
|
||||
Contents of the iwd config file for this network
|
||||
The lowest level values should be files, that will be read into the config files
|
||||
'';
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
inherit (lib)
|
||||
mkIf
|
||||
flip
|
||||
mapAttrsToList
|
||||
concatStringsSep
|
||||
;
|
||||
cfg = config.networking.wireless.iwd;
|
||||
|
||||
encoder = pkgs.writeScriptBin "encoder" ''
|
||||
#! ${pkgs.runtimeShell} -e
|
||||
|
||||
# Extract file-ext from network names
|
||||
ext="$(sed -re 's/.*\.(8021x|open|psk)$/\1/' <<< "$*")"
|
||||
to_enc="$(sed -re "s/(.*)\.$ext/\1/g" <<< "$*")"
|
||||
|
||||
# Encode ssid (excluding file-extensio) as base64 if needed
|
||||
[[ "$to_enc" =~ ^[[:alnum:]]+$ ]] && { echo "$to_enc.$ext"; exit 0; }
|
||||
echo "=$(printf "$to_enc" | ${pkgs.unixtools.xxd}/bin/xxd -pu).$ext"
|
||||
'';
|
||||
in
|
||||
mkIf cfg.enable {
|
||||
systemd.services.iwd = mkIf (cfg.networks != { }) {
|
||||
path = [ encoder ];
|
||||
preStart =
|
||||
let
|
||||
dataDir = "/var/lib/iwd";
|
||||
in
|
||||
''
|
||||
# Create config files for declaratively defined networks in the NixOS config.
|
||||
${concatStringsSep "\n" (
|
||||
flip mapAttrsToList cfg.networks (
|
||||
network: config: ''
|
||||
filename=${dataDir}/"$(encoder '${network}.${config.kind}')"
|
||||
touch "$filename"
|
||||
cat >$filename <<EOF
|
||||
${concatStringsSep "\n" (
|
||||
flip mapAttrsToList config.settings (
|
||||
toplevel: config: ''
|
||||
[${toplevel}]
|
||||
${concatStringsSep "\n" (
|
||||
flip mapAttrsToList config (
|
||||
name: value: ''
|
||||
${name}=$(<${value})
|
||||
''
|
||||
)
|
||||
)}
|
||||
''
|
||||
)
|
||||
)}
|
||||
EOF
|
||||
''
|
||||
)
|
||||
)}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
options,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
any
|
||||
attrNames
|
||||
attrValues
|
||||
|
@ -40,25 +40,32 @@
|
|||
;
|
||||
|
||||
cfg = config.services.kanidm;
|
||||
settingsFormat = pkgs.formats.toml {};
|
||||
settingsFormat = pkgs.formats.toml { };
|
||||
# Remove null values, so we can document optional values that don't end up in the generated TOML file.
|
||||
filterConfig = converge (filterAttrsRecursive (_: v: v != null));
|
||||
serverConfigFile = settingsFormat.generate "server.toml" (filterConfig cfg.serverSettings);
|
||||
clientConfigFile = settingsFormat.generate "kanidm-config.toml" (filterConfig cfg.clientSettings);
|
||||
unixConfigFile = settingsFormat.generate "kanidm-unixd.toml" (filterConfig cfg.unixSettings);
|
||||
certPaths = builtins.map builtins.dirOf [cfg.serverSettings.tls_chain cfg.serverSettings.tls_key];
|
||||
certPaths = builtins.map builtins.dirOf [
|
||||
cfg.serverSettings.tls_chain
|
||||
cfg.serverSettings.tls_key
|
||||
];
|
||||
|
||||
# Merge bind mount paths and remove paths where a prefix is already mounted.
|
||||
# This makes sure that if e.g. the tls_chain is in the nix store and /nix/store is already in the mount
|
||||
# paths, no new bind mount is added. Adding subpaths caused problems on ofborg.
|
||||
hasPrefixInList = list: newPath: any (path: hasPrefix (builtins.toString path) (builtins.toString newPath)) list;
|
||||
mergePaths = foldl' (merged: newPath: let
|
||||
# If the new path is a prefix to some existing path, we need to filter it out
|
||||
filteredPaths = filter (p: !hasPrefix (builtins.toString newPath) (builtins.toString p)) merged;
|
||||
# If a prefix of the new path is already in the list, do not add it
|
||||
filteredNew = optional (!hasPrefixInList filteredPaths newPath) newPath;
|
||||
in
|
||||
filteredPaths ++ filteredNew) [];
|
||||
hasPrefixInList =
|
||||
list: newPath: any (path: hasPrefix (builtins.toString path) (builtins.toString newPath)) list;
|
||||
mergePaths = foldl' (
|
||||
merged: newPath:
|
||||
let
|
||||
# If the new path is a prefix to some existing path, we need to filter it out
|
||||
filteredPaths = filter (p: !hasPrefix (builtins.toString newPath) (builtins.toString p)) merged;
|
||||
# If a prefix of the new path is already in the list, do not add it
|
||||
filteredNew = optional (!hasPrefixInList filteredPaths newPath) newPath;
|
||||
in
|
||||
filteredPaths ++ filteredNew
|
||||
) [ ];
|
||||
|
||||
defaultServiceConfig = {
|
||||
BindReadOnlyPaths = [
|
||||
|
@ -68,7 +75,7 @@
|
|||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
];
|
||||
CapabilityBoundingSet = [];
|
||||
CapabilityBoundingSet = [ ];
|
||||
# ProtectClock= adds DeviceAllow=char-rtc r
|
||||
DeviceAllow = "";
|
||||
# Implies ProtectSystem=strict, which re-mounts all paths
|
||||
|
@ -92,17 +99,21 @@
|
|||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectProc = "invisible";
|
||||
RestrictAddressFamilies = [];
|
||||
RestrictAddressFamilies = [ ];
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"];
|
||||
SystemCallFilter = [
|
||||
"@system-service"
|
||||
"~@privileged @resources @setuid @keyring"
|
||||
];
|
||||
# Does not work well with the temporary root
|
||||
#UMask = "0066";
|
||||
};
|
||||
|
||||
mkPresentOption = what:
|
||||
mkPresentOption =
|
||||
what:
|
||||
mkOption {
|
||||
description = mdDoc "Whether to ensure that this ${what} is present or absent.";
|
||||
type = types.bool;
|
||||
|
@ -111,20 +122,21 @@
|
|||
|
||||
filterPresent = filterAttrs (_: v: v.present);
|
||||
|
||||
provisionStateJson = pkgs.writeText "provision-state.json" (builtins.toJSON {
|
||||
inherit (cfg.provision) groups persons systems;
|
||||
});
|
||||
provisionStateJson = pkgs.writeText "provision-state.json" (
|
||||
builtins.toJSON { inherit (cfg.provision) groups persons systems; }
|
||||
);
|
||||
|
||||
serverPort =
|
||||
# ipv6:
|
||||
if hasInfix "]:" cfg.serverSettings.bindaddress
|
||||
then last (splitString "]:" cfg.serverSettings.bindaddress)
|
||||
if hasInfix "]:" cfg.serverSettings.bindaddress then
|
||||
last (splitString "]:" cfg.serverSettings.bindaddress)
|
||||
else
|
||||
# ipv4:
|
||||
if hasInfix "." cfg.serverSettings.bindaddress
|
||||
then last (splitString ":" cfg.serverSettings.bindaddress)
|
||||
# default is 8443
|
||||
else "8443";
|
||||
# ipv4:
|
||||
if hasInfix "." cfg.serverSettings.bindaddress then
|
||||
last (splitString ":" cfg.serverSettings.bindaddress)
|
||||
# default is 8443
|
||||
else
|
||||
"8443";
|
||||
|
||||
# Only recover the admin account if a password should explicitly be provisioned
|
||||
# for the account. Otherwise it is not needed for provisioning.
|
||||
|
@ -141,28 +153,29 @@
|
|||
# for the account we set it, otherwise we generate a new one because it is required
|
||||
# for provisioning.
|
||||
recoverIdmAdmin =
|
||||
if cfg.provision.idmAdminPasswordFile != null
|
||||
then ''
|
||||
KANIDM_IDM_ADMIN_PASSWORD=$(< ${cfg.provision.idmAdminPasswordFile})
|
||||
# We always reset the idm_admin account password if a desired password was specified.
|
||||
if ! KANIDM_RECOVER_ACCOUNT_PASSWORD=$KANIDM_IDM_ADMIN_PASSWORD ${cfg.package}/bin/kanidmd recover-account -c ${serverConfigFile} idm_admin --from-environment >/dev/null; then
|
||||
echo "Failed to recover idm_admin account" >&2
|
||||
exit 1
|
||||
fi
|
||||
''
|
||||
else ''
|
||||
# Recover idm_admin account
|
||||
if ! recover_out=$(${cfg.package}/bin/kanidmd recover-account -c ${serverConfigFile} idm_admin -o json); then
|
||||
echo "$recover_out" >&2
|
||||
echo "kanidm provision: Failed to recover admin account" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! KANIDM_IDM_ADMIN_PASSWORD=$(grep '{"password' <<< "$recover_out" | ${getExe pkgs.jq} -r .password); then
|
||||
echo "$recover_out" >&2
|
||||
echo "kanidm provision: Failed to parse password for idm_admin account" >&2
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
if cfg.provision.idmAdminPasswordFile != null then
|
||||
''
|
||||
KANIDM_IDM_ADMIN_PASSWORD=$(< ${cfg.provision.idmAdminPasswordFile})
|
||||
# We always reset the idm_admin account password if a desired password was specified.
|
||||
if ! KANIDM_RECOVER_ACCOUNT_PASSWORD=$KANIDM_IDM_ADMIN_PASSWORD ${cfg.package}/bin/kanidmd recover-account -c ${serverConfigFile} idm_admin --from-environment >/dev/null; then
|
||||
echo "Failed to recover idm_admin account" >&2
|
||||
exit 1
|
||||
fi
|
||||
''
|
||||
else
|
||||
''
|
||||
# Recover idm_admin account
|
||||
if ! recover_out=$(${cfg.package}/bin/kanidmd recover-account -c ${serverConfigFile} idm_admin -o json); then
|
||||
echo "$recover_out" >&2
|
||||
echo "kanidm provision: Failed to recover admin account" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! KANIDM_IDM_ADMIN_PASSWORD=$(grep '{"password' <<< "$recover_out" | ${getExe pkgs.jq} -r .password); then
|
||||
echo "$recover_out" >&2
|
||||
echo "kanidm provision: Failed to parse password for idm_admin account" >&2
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
|
||||
postStartScript = pkgs.writeShellScript "post-start" ''
|
||||
set -euo pipefail
|
||||
|
@ -191,13 +204,14 @@
|
|||
KANIDM_PROVISION_IDM_ADMIN_TOKEN=$KANIDM_IDM_ADMIN_PASSWORD \
|
||||
${getExe pkgs.kanidm-provision} --url "${cfg.provision.instanceUrl}" --state ${provisionStateJson} ${optionalString cfg.provision.acceptInvalidCerts "--accept-invalid-certs"}
|
||||
'';
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.kanidm = {
|
||||
enableClient = mkEnableOption (mdDoc "the Kanidm client");
|
||||
enableServer = mkEnableOption (mdDoc "the Kanidm server");
|
||||
enablePam = mkEnableOption (mdDoc "the Kanidm PAM and NSS integration");
|
||||
|
||||
package = mkPackageOption pkgs "kanidm" {};
|
||||
package = mkPackageOption pkgs "kanidm" { };
|
||||
|
||||
serverSettings = mkOption {
|
||||
type = types.submodule {
|
||||
|
@ -253,12 +267,20 @@ in {
|
|||
log_level = mkOption {
|
||||
description = mdDoc "Log level of the server.";
|
||||
default = "info";
|
||||
type = types.enum ["info" "debug" "trace"];
|
||||
type = types.enum [
|
||||
"info"
|
||||
"debug"
|
||||
"trace"
|
||||
];
|
||||
};
|
||||
role = mkOption {
|
||||
description = mdDoc "The role of this server. This affects the replication relationship and thereby available features.";
|
||||
default = "WriteReplica";
|
||||
type = types.enum ["WriteReplica" "WriteReplicaNoUI" "ReadOnlyReplica"];
|
||||
type = types.enum [
|
||||
"WriteReplica"
|
||||
"WriteReplicaNoUI"
|
||||
"ReadOnlyReplica"
|
||||
];
|
||||
};
|
||||
online_backup = {
|
||||
path = mkOption {
|
||||
|
@ -284,7 +306,7 @@ in {
|
|||
};
|
||||
};
|
||||
};
|
||||
default = {};
|
||||
default = { };
|
||||
description = mdDoc ''
|
||||
Settings for Kanidm, see
|
||||
[the documentation](https://kanidm.github.io/kanidm/stable/server_configuration.html)
|
||||
|
@ -387,227 +409,245 @@ in {
|
|||
|
||||
groups = mkOption {
|
||||
description = "Provisioning of kanidm groups";
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule (groupSubmod: {
|
||||
options = {
|
||||
present = mkPresentOption "group";
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule (groupSubmod: {
|
||||
options = {
|
||||
present = mkPresentOption "group";
|
||||
|
||||
members = mkOption {
|
||||
description = "List of kanidm entities (persons, groups, ...) which are part of this group.";
|
||||
type = types.listOf types.str;
|
||||
apply = unique;
|
||||
default = [];
|
||||
members = mkOption {
|
||||
description = "List of kanidm entities (persons, groups, ...) which are part of this group.";
|
||||
type = types.listOf types.str;
|
||||
apply = unique;
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
config.members = concatLists (flip mapAttrsToList cfg.provision.persons (
|
||||
person: personCfg:
|
||||
optional (personCfg.present && builtins.elem groupSubmod.config._module.args.name personCfg.groups) person
|
||||
));
|
||||
}));
|
||||
config.members = concatLists (
|
||||
flip mapAttrsToList cfg.provision.persons (
|
||||
person: personCfg:
|
||||
optional (
|
||||
personCfg.present && builtins.elem groupSubmod.config._module.args.name personCfg.groups
|
||||
) person
|
||||
)
|
||||
);
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
persons = mkOption {
|
||||
description = "Provisioning of kanidm persons";
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
present = mkPresentOption "person";
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options = {
|
||||
present = mkPresentOption "person";
|
||||
|
||||
displayName = mkOption {
|
||||
description = "Display name";
|
||||
type = types.str;
|
||||
example = "My User";
|
||||
};
|
||||
displayName = mkOption {
|
||||
description = "Display name";
|
||||
type = types.str;
|
||||
example = "My User";
|
||||
};
|
||||
|
||||
legalName = mkOption {
|
||||
description = "Full legal name";
|
||||
type = types.nullOr types.str;
|
||||
example = "Jane Doe";
|
||||
default = null;
|
||||
};
|
||||
legalName = mkOption {
|
||||
description = "Full legal name";
|
||||
type = types.nullOr types.str;
|
||||
example = "Jane Doe";
|
||||
default = null;
|
||||
};
|
||||
|
||||
mailAddresses = mkOption {
|
||||
description = "Mail addresses. First given address is considered the primary address.";
|
||||
type = types.listOf types.str;
|
||||
example = ["jane.doe@example.com"];
|
||||
default = [];
|
||||
};
|
||||
mailAddresses = mkOption {
|
||||
description = "Mail addresses. First given address is considered the primary address.";
|
||||
type = types.listOf types.str;
|
||||
example = [ "jane.doe@example.com" ];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
groups = mkOption {
|
||||
description = "List of groups this person should belong to.";
|
||||
type = types.listOf types.str;
|
||||
apply = unique;
|
||||
default = [];
|
||||
groups = mkOption {
|
||||
description = "List of groups this person should belong to.";
|
||||
type = types.listOf types.str;
|
||||
apply = unique;
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
systems.oauth2 = mkOption {
|
||||
description = "Provisioning of oauth2 resource servers";
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
present = mkPresentOption "oauth2 resource server";
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options = {
|
||||
present = mkPresentOption "oauth2 resource server";
|
||||
|
||||
public = mkOption {
|
||||
description = "Whether this is a public client (enforces PKCE, doesn't use a basic secret)";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
public = mkOption {
|
||||
description = "Whether this is a public client (enforces PKCE, doesn't use a basic secret)";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
displayName = mkOption {
|
||||
description = "Display name";
|
||||
type = types.str;
|
||||
example = "Some Service";
|
||||
};
|
||||
|
||||
originUrl = mkOption {
|
||||
description = "The origin URL of the service. OAuth2 redirects will only be allowed to sites under this origin. Must end with a slash.";
|
||||
type = types.strMatching ".*://.*/$";
|
||||
example = "https://someservice.example.com/";
|
||||
};
|
||||
|
||||
originLanding = mkOption {
|
||||
description = "When redirecting from the Kanidm Apps Listing page, some linked applications may need to land on a specific page to trigger oauth2/oidc interactions.";
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "https://someservice.example.com/home";
|
||||
};
|
||||
|
||||
basicSecretFile = mkOption {
|
||||
description = ''
|
||||
The basic secret to use for this service. If null, the random secret generated
|
||||
by kanidm will not be touched. Do NOT use a path from the nix store here!
|
||||
'';
|
||||
type = types.nullOr types.path;
|
||||
example = "/run/secrets/some-oauth2-basic-secret";
|
||||
default = null;
|
||||
};
|
||||
|
||||
enableLocalhostRedirects = mkOption {
|
||||
description = "Allow localhost redirects. Only for public clients.";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
enableLegacyCrypto = mkOption {
|
||||
description = "Enable legacy crypto on this client. Allows JWT signing algorthms like RS256.";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
allowInsecureClientDisablePkce = mkOption {
|
||||
description = ''
|
||||
Disable PKCE on this oauth2 resource server to work around insecure clients
|
||||
that may not support it. You should request the client to enable PKCE!
|
||||
Only for non-public clients.
|
||||
'';
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
preferShortUsername = mkOption {
|
||||
description = "Use 'name' instead of 'spn' in the preferred_username claim";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
scopeMaps = mkOption {
|
||||
description = ''
|
||||
Maps kanidm groups to returned oauth scopes.
|
||||
See [Scope Relations](https://kanidm.github.io/kanidm/stable/integrations/oauth2.html#scope-relationships) for more information.
|
||||
'';
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
default = { };
|
||||
};
|
||||
|
||||
supplementaryScopeMaps = mkOption {
|
||||
description = ''
|
||||
Maps kanidm groups to additionally returned oauth scopes.
|
||||
See [Scope Relations](https://kanidm.github.io/kanidm/stable/integrations/oauth2.html#scope-relationships) for more information.
|
||||
'';
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
default = { };
|
||||
};
|
||||
|
||||
removeOrphanedClaimMaps = mkOption {
|
||||
description = "Whether claim maps not specified here but present in kanidm should be removed from kanidm.";
|
||||
type = types.bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
claimMaps = mkOption {
|
||||
description = ''
|
||||
Adds additional claims (and values) based on which kanidm groups an authenticating party belongs to.
|
||||
See [Claim Maps](https://kanidm.github.io/kanidm/master/integrations/oauth2.html#custom-claim-maps) for more information.
|
||||
'';
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options = {
|
||||
joinType = mkOption {
|
||||
description = ''
|
||||
Determines how multiple values are joined to create the claim value.
|
||||
See [Claim Maps](https://kanidm.github.io/kanidm/master/integrations/oauth2.html#custom-claim-maps) for more information.
|
||||
'';
|
||||
type = types.enum [
|
||||
"array"
|
||||
"csv"
|
||||
"ssv"
|
||||
];
|
||||
default = "array";
|
||||
};
|
||||
|
||||
valuesByGroup = mkOption {
|
||||
description = "Maps kanidm groups to values for the claim.";
|
||||
default = { };
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
displayName = mkOption {
|
||||
description = "Display name";
|
||||
type = types.str;
|
||||
example = "Some Service";
|
||||
};
|
||||
|
||||
originUrl = mkOption {
|
||||
description = "The origin URL of the service. OAuth2 redirects will only be allowed to sites under this origin. Must end with a slash.";
|
||||
type = types.strMatching ".*://.*/$";
|
||||
example = "https://someservice.example.com/";
|
||||
};
|
||||
|
||||
originLanding = mkOption {
|
||||
description = "When redirecting from the Kanidm Apps Listing page, some linked applications may need to land on a specific page to trigger oauth2/oidc interactions.";
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "https://someservice.example.com/home";
|
||||
};
|
||||
|
||||
basicSecretFile = mkOption {
|
||||
description = ''
|
||||
The basic secret to use for this service. If null, the random secret generated
|
||||
by kanidm will not be touched. Do NOT use a path from the nix store here!
|
||||
'';
|
||||
type = types.nullOr types.path;
|
||||
example = "/run/secrets/some-oauth2-basic-secret";
|
||||
default = null;
|
||||
};
|
||||
|
||||
enableLocalhostRedirects = mkOption {
|
||||
description = "Allow localhost redirects. Only for public clients.";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
enableLegacyCrypto = mkOption {
|
||||
description = "Enable legacy crypto on this client. Allows JWT signing algorthms like RS256.";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
allowInsecureClientDisablePkce = mkOption {
|
||||
description = ''
|
||||
Disable PKCE on this oauth2 resource server to work around insecure clients
|
||||
that may not support it. You should request the client to enable PKCE!
|
||||
Only for non-public clients.
|
||||
'';
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
preferShortUsername = mkOption {
|
||||
description = "Use 'name' instead of 'spn' in the preferred_username claim";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
scopeMaps = mkOption {
|
||||
description = ''
|
||||
Maps kanidm groups to returned oauth scopes.
|
||||
See [Scope Relations](https://kanidm.github.io/kanidm/stable/integrations/oauth2.html#scope-relationships) for more information.
|
||||
'';
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
default = {};
|
||||
};
|
||||
|
||||
supplementaryScopeMaps = mkOption {
|
||||
description = ''
|
||||
Maps kanidm groups to additionally returned oauth scopes.
|
||||
See [Scope Relations](https://kanidm.github.io/kanidm/stable/integrations/oauth2.html#scope-relationships) for more information.
|
||||
'';
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
default = {};
|
||||
};
|
||||
|
||||
removeOrphanedClaimMaps = mkOption {
|
||||
description = "Whether claim maps not specified here but present in kanidm should be removed from kanidm.";
|
||||
type = types.bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
claimMaps = mkOption {
|
||||
description = ''
|
||||
Adds additional claims (and values) based on which kanidm groups an authenticating party belongs to.
|
||||
See [Claim Maps](https://kanidm.github.io/kanidm/master/integrations/oauth2.html#custom-claim-maps) for more information.
|
||||
'';
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
joinType = mkOption {
|
||||
description = ''
|
||||
Determines how multiple values are joined to create the claim value.
|
||||
See [Claim Maps](https://kanidm.github.io/kanidm/master/integrations/oauth2.html#custom-claim-maps) for more information.
|
||||
'';
|
||||
type = types.enum ["array" "csv" "ssv"];
|
||||
default = "array";
|
||||
};
|
||||
|
||||
valuesByGroup = mkOption {
|
||||
description = "Maps kanidm groups to values for the claim.";
|
||||
default = {};
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (cfg.enableClient || cfg.enableServer || cfg.enablePam) {
|
||||
assertions = let
|
||||
entityList = type: attrs: flip mapAttrsToList (filterPresent attrs) (name: _: {inherit type name;});
|
||||
entities =
|
||||
entityList "group" cfg.provision.groups
|
||||
++ entityList "person" cfg.provision.persons
|
||||
++ entityList "oauth2" cfg.provision.systems.oauth2;
|
||||
assertions =
|
||||
let
|
||||
entityList =
|
||||
type: attrs: flip mapAttrsToList (filterPresent attrs) (name: _: { inherit type name; });
|
||||
entities =
|
||||
entityList "group" cfg.provision.groups
|
||||
++ entityList "person" cfg.provision.persons
|
||||
++ entityList "oauth2" cfg.provision.systems.oauth2;
|
||||
|
||||
# Accumulate entities by name. Track corresponding entity types for later duplicate check.
|
||||
entitiesByName =
|
||||
foldl' (
|
||||
acc: {
|
||||
type,
|
||||
name,
|
||||
}:
|
||||
acc
|
||||
// {
|
||||
${name} = (acc.${name} or []) ++ [type];
|
||||
}
|
||||
) {}
|
||||
entities;
|
||||
# Accumulate entities by name. Track corresponding entity types for later duplicate check.
|
||||
entitiesByName = foldl' (
|
||||
acc: { type, name }: acc // { ${name} = (acc.${name} or [ ]) ++ [ type ]; }
|
||||
) { } entities;
|
||||
|
||||
assertGroupsKnown = opt: groups: let
|
||||
knownGroups = attrNames (filterPresent cfg.provision.groups);
|
||||
unknownGroups = subtractLists knownGroups groups;
|
||||
in {
|
||||
assertion = (cfg.enableServer && cfg.provision.enable) -> unknownGroups == [];
|
||||
message = "${opt} refers to unknown groups: ${toString unknownGroups}";
|
||||
};
|
||||
assertGroupsKnown =
|
||||
opt: groups:
|
||||
let
|
||||
knownGroups = attrNames (filterPresent cfg.provision.groups);
|
||||
unknownGroups = subtractLists knownGroups groups;
|
||||
in
|
||||
{
|
||||
assertion = (cfg.enableServer && cfg.provision.enable) -> unknownGroups == [ ];
|
||||
message = "${opt} refers to unknown groups: ${toString unknownGroups}";
|
||||
};
|
||||
|
||||
assertEntitiesKnown = opt: entities: let
|
||||
unknownEntities = subtractLists (attrNames entitiesByName) entities;
|
||||
in {
|
||||
assertion = (cfg.enableServer && cfg.provision.enable) -> unknownEntities == [];
|
||||
message = "${opt} refers to unknown entities: ${toString unknownEntities}";
|
||||
};
|
||||
in
|
||||
assertEntitiesKnown =
|
||||
opt: entities:
|
||||
let
|
||||
unknownEntities = subtractLists (attrNames entitiesByName) entities;
|
||||
in
|
||||
{
|
||||
assertion = (cfg.enableServer && cfg.provision.enable) -> unknownEntities == [ ];
|
||||
message = "${opt} refers to unknown entities: ${toString unknownEntities}";
|
||||
};
|
||||
in
|
||||
[
|
||||
{
|
||||
assertion = !cfg.enableServer || ((cfg.serverSettings.tls_chain or null) == null) || (!isStorePath cfg.serverSettings.tls_chain);
|
||||
assertion =
|
||||
!cfg.enableServer
|
||||
|| ((cfg.serverSettings.tls_chain or null) == null)
|
||||
|| (!isStorePath cfg.serverSettings.tls_chain);
|
||||
message = ''
|
||||
<option>services.kanidm.serverSettings.tls_chain</option> points to
|
||||
a file in the Nix store. You should use a quoted absolute path to
|
||||
|
@ -615,7 +655,10 @@ in {
|
|||
'';
|
||||
}
|
||||
{
|
||||
assertion = !cfg.enableServer || ((cfg.serverSettings.tls_key or null) == null) || (!isStorePath cfg.serverSettings.tls_key);
|
||||
assertion =
|
||||
!cfg.enableServer
|
||||
|| ((cfg.serverSettings.tls_key or null) == null)
|
||||
|| (!isStorePath cfg.serverSettings.tls_key);
|
||||
message = ''
|
||||
<option>services.kanidm.serverSettings.tls_key</option> points to
|
||||
a file in the Nix store. You should use a quoted absolute path to
|
||||
|
@ -639,9 +682,10 @@ in {
|
|||
{
|
||||
assertion =
|
||||
!cfg.enableServer
|
||||
|| (cfg.serverSettings.domain
|
||||
== null
|
||||
-> cfg.serverSettings.role == "WriteReplica" || cfg.serverSettings.role == "WriteReplicaNoUI");
|
||||
|| (
|
||||
cfg.serverSettings.domain == null
|
||||
-> cfg.serverSettings.role == "WriteReplica" || cfg.serverSettings.role == "WriteReplicaNoUI"
|
||||
);
|
||||
message = ''
|
||||
<option>services.kanidm.serverSettings.domain</option> can only be set if this instance
|
||||
is not a ReadOnlyReplica. Otherwise the db would inherit it from
|
||||
|
@ -655,13 +699,14 @@ in {
|
|||
# If any secret is provisioned, the kanidm package must have some required patches applied to it
|
||||
{
|
||||
assertion =
|
||||
(cfg.provision.enable
|
||||
(
|
||||
cfg.provision.enable
|
||||
&& (
|
||||
cfg.provision.adminPasswordFile
|
||||
!= null
|
||||
cfg.provision.adminPasswordFile != null
|
||||
|| cfg.provision.idmAdminPasswordFile != null
|
||||
|| any (x: x.basicSecretFile != null) (attrValues (filterPresent cfg.provision.systems.oauth2))
|
||||
))
|
||||
)
|
||||
)
|
||||
-> cfg.package.enableSecretProvisioning;
|
||||
message = ''
|
||||
Specifying an admin account password or oauth2 basicSecretFile requires kanidm to be built with the secret provisioning patches.
|
||||
|
@ -669,56 +714,80 @@ in {
|
|||
'';
|
||||
}
|
||||
# Entity names must be globally unique:
|
||||
(let
|
||||
# Filter all names that occurred in more than one entity type.
|
||||
duplicateNames = filterAttrs (_: v: builtins.length v > 1) entitiesByName;
|
||||
in {
|
||||
assertion = cfg.provision.enable -> duplicateNames == {};
|
||||
message = ''
|
||||
services.kanidm.provision requires all entity names (group, person, oauth2, ...) to be unique!
|
||||
${concatLines (mapAttrsToList (name: xs: " - '${name}' used as: ${toString xs}") duplicateNames)}'';
|
||||
})
|
||||
(
|
||||
let
|
||||
# Filter all names that occurred in more than one entity type.
|
||||
duplicateNames = filterAttrs (_: v: builtins.length v > 1) entitiesByName;
|
||||
in
|
||||
{
|
||||
assertion = cfg.provision.enable -> duplicateNames == { };
|
||||
message = ''
|
||||
services.kanidm.provision requires all entity names (group, person, oauth2, ...) to be unique!
|
||||
${concatLines (
|
||||
mapAttrsToList (name: xs: " - '${name}' used as: ${toString xs}") duplicateNames
|
||||
)}'';
|
||||
}
|
||||
)
|
||||
]
|
||||
++ flip mapAttrsToList (filterPresent cfg.provision.persons) (
|
||||
person: personCfg:
|
||||
assertGroupsKnown "services.kanidm.provision.persons.${person}.groups" personCfg.groups
|
||||
assertGroupsKnown "services.kanidm.provision.persons.${person}.groups" personCfg.groups
|
||||
)
|
||||
++ flip mapAttrsToList (filterPresent cfg.provision.groups) (
|
||||
group: groupCfg:
|
||||
assertEntitiesKnown "services.kanidm.provision.groups.${group}.members" groupCfg.members
|
||||
assertEntitiesKnown "services.kanidm.provision.groups.${group}.members" groupCfg.members
|
||||
)
|
||||
++ concatLists (flip mapAttrsToList (filterPresent cfg.provision.systems.oauth2) (
|
||||
oauth2: oauth2Cfg:
|
||||
++ concatLists (
|
||||
flip mapAttrsToList (filterPresent cfg.provision.systems.oauth2) (
|
||||
oauth2: oauth2Cfg:
|
||||
[
|
||||
(assertGroupsKnown "services.kanidm.provision.systems.oauth2.${oauth2}.scopeMaps" (attrNames oauth2Cfg.scopeMaps))
|
||||
(assertGroupsKnown "services.kanidm.provision.systems.oauth2.${oauth2}.supplementaryScopeMaps" (attrNames oauth2Cfg.supplementaryScopeMaps))
|
||||
(assertGroupsKnown "services.kanidm.provision.systems.oauth2.${oauth2}.scopeMaps" (
|
||||
attrNames oauth2Cfg.scopeMaps
|
||||
))
|
||||
(assertGroupsKnown "services.kanidm.provision.systems.oauth2.${oauth2}.supplementaryScopeMaps" (
|
||||
attrNames oauth2Cfg.supplementaryScopeMaps
|
||||
))
|
||||
]
|
||||
++ concatLists (flip mapAttrsToList oauth2Cfg.claimMaps (claim: claimCfg: [
|
||||
(assertGroupsKnown "services.kanidm.provision.systems.oauth2.${oauth2}.claimMaps.${claim}.valuesByGroup" (attrNames claimCfg.valuesByGroup))
|
||||
# At least one group must map to a value in each claim map
|
||||
{
|
||||
assertion = (cfg.provision.enable && cfg.enableServer) -> any (xs: xs != []) (attrValues claimCfg.valuesByGroup);
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2}.claimMaps.${claim} does not specify any values for any group";
|
||||
}
|
||||
# Public clients cannot define a basic secret
|
||||
{
|
||||
assertion = (cfg.provision.enable && cfg.enableServer && oauth2Cfg.public) -> oauth2Cfg.basicSecretFile == null;
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2} is a public client and thus cannot specify a basic secret";
|
||||
}
|
||||
# Public clients cannot disable PKCE
|
||||
{
|
||||
assertion = (cfg.provision.enable && cfg.enableServer && oauth2Cfg.public) -> !oauth2Cfg.allowInsecureClientDisablePkce;
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2} is a public client and thus cannot disable PKCE";
|
||||
}
|
||||
# Non-public clients cannot enable localhost redirects
|
||||
{
|
||||
assertion = (cfg.provision.enable && cfg.enableServer && !oauth2Cfg.public) -> !oauth2Cfg.enableLocalhostRedirects;
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2} is a non-public client and thus cannot enable localhost redirects";
|
||||
}
|
||||
]))
|
||||
));
|
||||
++ concatLists (
|
||||
flip mapAttrsToList oauth2Cfg.claimMaps (
|
||||
claim: claimCfg: [
|
||||
(assertGroupsKnown "services.kanidm.provision.systems.oauth2.${oauth2}.claimMaps.${claim}.valuesByGroup" (
|
||||
attrNames claimCfg.valuesByGroup
|
||||
))
|
||||
# At least one group must map to a value in each claim map
|
||||
{
|
||||
assertion =
|
||||
(cfg.provision.enable && cfg.enableServer)
|
||||
-> any (xs: xs != [ ]) (attrValues claimCfg.valuesByGroup);
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2}.claimMaps.${claim} does not specify any values for any group";
|
||||
}
|
||||
# Public clients cannot define a basic secret
|
||||
{
|
||||
assertion =
|
||||
(cfg.provision.enable && cfg.enableServer && oauth2Cfg.public) -> oauth2Cfg.basicSecretFile == null;
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2} is a public client and thus cannot specify a basic secret";
|
||||
}
|
||||
# Public clients cannot disable PKCE
|
||||
{
|
||||
assertion =
|
||||
(cfg.provision.enable && cfg.enableServer && oauth2Cfg.public)
|
||||
-> !oauth2Cfg.allowInsecureClientDisablePkce;
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2} is a public client and thus cannot disable PKCE";
|
||||
}
|
||||
# Non-public clients cannot enable localhost redirects
|
||||
{
|
||||
assertion =
|
||||
(cfg.provision.enable && cfg.enableServer && !oauth2Cfg.public)
|
||||
-> !oauth2Cfg.enableLocalhostRedirects;
|
||||
message = "services.kanidm.provision.systems.oauth2.${oauth2} is a non-public client and thus cannot enable localhost redirects";
|
||||
}
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
environment.systemPackages = mkIf cfg.enableClient [cfg.package];
|
||||
environment.systemPackages = mkIf cfg.enableClient [ cfg.package ];
|
||||
|
||||
systemd.tmpfiles.settings."10-kanidm" = {
|
||||
${cfg.serverSettings.online_backup.path}.d = {
|
||||
|
@ -730,14 +799,16 @@ in {
|
|||
|
||||
systemd.services.kanidm = mkIf cfg.enableServer {
|
||||
description = "kanidm identity management daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = mkMerge [
|
||||
# Merge paths and ignore existing prefixes needs to sidestep mkMerge
|
||||
(defaultServiceConfig
|
||||
(
|
||||
defaultServiceConfig
|
||||
// {
|
||||
BindReadOnlyPaths = mergePaths (defaultServiceConfig.BindReadOnlyPaths ++ certPaths);
|
||||
})
|
||||
}
|
||||
)
|
||||
{
|
||||
StateDirectory = "kanidm";
|
||||
StateDirectoryMode = "0700";
|
||||
|
@ -754,13 +825,17 @@ in {
|
|||
cfg.serverSettings.online_backup.path
|
||||
];
|
||||
|
||||
AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
|
||||
CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
|
||||
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
|
||||
# This would otherwise override the CAP_NET_BIND_SERVICE capability.
|
||||
PrivateUsers = mkForce false;
|
||||
# Port needs to be exposed to the host network
|
||||
PrivateNetwork = mkForce false;
|
||||
RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"];
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
"AF_UNIX"
|
||||
];
|
||||
TemporaryFileSystem = "/:ro";
|
||||
}
|
||||
];
|
||||
|
@ -769,9 +844,12 @@ in {
|
|||
|
||||
systemd.services.kanidm-unixd = mkIf cfg.enablePam {
|
||||
description = "Kanidm PAM daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
restartTriggers = [unixConfigFile clientConfigFile];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
restartTriggers = [
|
||||
unixConfigFile
|
||||
clientConfigFile
|
||||
];
|
||||
serviceConfig = mkMerge [
|
||||
defaultServiceConfig
|
||||
{
|
||||
|
@ -796,7 +874,11 @@ in {
|
|||
];
|
||||
# Needs to connect to kanidmd
|
||||
PrivateNetwork = mkForce false;
|
||||
RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"];
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
"AF_UNIX"
|
||||
];
|
||||
TemporaryFileSystem = "/:ro";
|
||||
}
|
||||
];
|
||||
|
@ -805,10 +887,16 @@ in {
|
|||
|
||||
systemd.services.kanidm-unixd-tasks = mkIf cfg.enablePam {
|
||||
description = "Kanidm PAM home management daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target" "kanidm-unixd.service"];
|
||||
partOf = ["kanidm-unixd.service"];
|
||||
restartTriggers = [unixConfigFile clientConfigFile];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [
|
||||
"network.target"
|
||||
"kanidm-unixd.service"
|
||||
];
|
||||
partOf = [ "kanidm-unixd.service" ];
|
||||
restartTriggers = [
|
||||
unixConfigFile
|
||||
clientConfigFile
|
||||
];
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/kanidm_unixd_tasks";
|
||||
|
||||
|
@ -828,13 +916,18 @@ in {
|
|||
"/run/kanidm-unixd:/var/run/kanidm-unixd"
|
||||
];
|
||||
# CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket
|
||||
CapabilityBoundingSet = ["CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_DAC_READ_SEARCH"];
|
||||
CapabilityBoundingSet = [
|
||||
"CAP_CHOWN"
|
||||
"CAP_FOWNER"
|
||||
"CAP_DAC_OVERRIDE"
|
||||
"CAP_DAC_READ_SEARCH"
|
||||
];
|
||||
IPAddressDeny = "any";
|
||||
# Need access to users
|
||||
PrivateUsers = false;
|
||||
# Need access to home directories
|
||||
ProtectHome = false;
|
||||
RestrictAddressFamilies = ["AF_UNIX"];
|
||||
RestrictAddressFamilies = [ "AF_UNIX" ];
|
||||
TemporaryFileSystem = "/:ro";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
|
@ -843,29 +936,21 @@ in {
|
|||
|
||||
# These paths are hardcoded
|
||||
environment.etc = mkMerge [
|
||||
(mkIf cfg.enableServer {
|
||||
"kanidm/server.toml".source = serverConfigFile;
|
||||
})
|
||||
(mkIf cfg.enableServer { "kanidm/server.toml".source = serverConfigFile; })
|
||||
(mkIf options.services.kanidm.clientSettings.isDefined {
|
||||
"kanidm/config".source = clientConfigFile;
|
||||
})
|
||||
(mkIf cfg.enablePam {
|
||||
"kanidm/unixd".source = unixConfigFile;
|
||||
})
|
||||
(mkIf cfg.enablePam { "kanidm/unixd".source = unixConfigFile; })
|
||||
];
|
||||
|
||||
system.nssModules = mkIf cfg.enablePam [cfg.package];
|
||||
system.nssModules = mkIf cfg.enablePam [ cfg.package ];
|
||||
|
||||
system.nssDatabases.group = optional cfg.enablePam "kanidm";
|
||||
system.nssDatabases.passwd = optional cfg.enablePam "kanidm";
|
||||
|
||||
users.groups = mkMerge [
|
||||
(mkIf cfg.enableServer {
|
||||
kanidm = {};
|
||||
})
|
||||
(mkIf cfg.enablePam {
|
||||
kanidm-unixd = {};
|
||||
})
|
||||
(mkIf cfg.enableServer { kanidm = { }; })
|
||||
(mkIf cfg.enablePam { kanidm-unixd = { }; })
|
||||
];
|
||||
users.users = mkMerge [
|
||||
(mkIf cfg.enableServer {
|
||||
|
@ -873,7 +958,7 @@ in {
|
|||
description = "Kanidm server";
|
||||
isSystemUser = true;
|
||||
group = "kanidm";
|
||||
packages = [cfg.package];
|
||||
packages = [ cfg.package ];
|
||||
};
|
||||
})
|
||||
(mkIf cfg.enablePam {
|
||||
|
@ -886,6 +971,10 @@ in {
|
|||
];
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [erictapen Flakebi oddlama];
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
erictapen
|
||||
Flakebi
|
||||
oddlama
|
||||
];
|
||||
meta.buildDocsInSandbox = false;
|
||||
}
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
{lib, ...}: let
|
||||
inherit
|
||||
(lib)
|
||||
mkOption
|
||||
types
|
||||
;
|
||||
in {
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.node = {
|
||||
secretsDir = mkOption {
|
||||
description = "Path to the secrets directory for this node.";
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
attrNames
|
||||
getExe
|
||||
literalExpression
|
||||
|
@ -22,8 +22,7 @@
|
|||
versionOlder
|
||||
;
|
||||
|
||||
inherit
|
||||
(lib.types)
|
||||
inherit (lib.types)
|
||||
attrsOf
|
||||
port
|
||||
str
|
||||
|
@ -36,7 +35,8 @@
|
|||
kernel = config.boot.kernelPackages;
|
||||
|
||||
cfg = config.services.netbird;
|
||||
in {
|
||||
in
|
||||
{
|
||||
meta.maintainers = with maintainers; [
|
||||
misuzu
|
||||
thubrecht
|
||||
|
@ -46,16 +46,13 @@ in {
|
|||
|
||||
options.services.netbird = {
|
||||
enable = mkEnableOption (lib.mdDoc "Netbird daemon");
|
||||
package = mkPackageOption pkgs "netbird" {};
|
||||
package = mkPackageOption pkgs "netbird" { };
|
||||
|
||||
tunnels = mkOption {
|
||||
type = attrsOf (
|
||||
submodule (
|
||||
{ name, config, ... }:
|
||||
{
|
||||
name,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
port = mkOption {
|
||||
type = port;
|
||||
|
@ -111,7 +108,7 @@ in {
|
|||
}
|
||||
)
|
||||
);
|
||||
default = {};
|
||||
default = { };
|
||||
description = ''
|
||||
Attribute set of Netbird tunnels, each one will spawn a daemon listening on ...
|
||||
'';
|
||||
|
@ -124,106 +121,99 @@ in {
|
|||
services.netbird.tunnels.wt0.stateDir = "netbird";
|
||||
})
|
||||
|
||||
(mkIf (cfg.tunnels != {}) {
|
||||
(mkIf (cfg.tunnels != { }) {
|
||||
boot.extraModulePackages = optional (versionOlder kernel.kernel.version "5.6") kernel.wireguard;
|
||||
|
||||
environment.systemPackages = [cfg.package];
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
networking.dhcpcd.denyInterfaces = attrNames cfg.tunnels;
|
||||
|
||||
systemd.network.networks = mkIf config.networking.useNetworkd (
|
||||
mapAttrs'
|
||||
(
|
||||
mapAttrs' (
|
||||
name: _:
|
||||
nameValuePair "50-netbird-${name}" {
|
||||
matchConfig = {
|
||||
Name = name;
|
||||
};
|
||||
linkConfig = {
|
||||
Unmanaged = true;
|
||||
ActivationPolicy = "manual";
|
||||
};
|
||||
}
|
||||
)
|
||||
cfg.tunnels
|
||||
nameValuePair "50-netbird-${name}" {
|
||||
matchConfig = {
|
||||
Name = name;
|
||||
};
|
||||
linkConfig = {
|
||||
Unmanaged = true;
|
||||
ActivationPolicy = "manual";
|
||||
};
|
||||
}
|
||||
) cfg.tunnels
|
||||
);
|
||||
|
||||
systemd.services =
|
||||
mapAttrs'
|
||||
(
|
||||
name: {
|
||||
environment,
|
||||
stateDir,
|
||||
environmentFile,
|
||||
userAccess,
|
||||
...
|
||||
}:
|
||||
nameValuePair "netbird-${name}" {
|
||||
description = "A WireGuard-based mesh network that connects your devices into a single private network";
|
||||
systemd.services = mapAttrs' (
|
||||
name:
|
||||
{
|
||||
environment,
|
||||
stateDir,
|
||||
environmentFile,
|
||||
userAccess,
|
||||
...
|
||||
}:
|
||||
nameValuePair "netbird-${name}" {
|
||||
description = "A WireGuard-based mesh network that connects your devices into a single private network";
|
||||
|
||||
documentation = ["https://netbird.io/docs/"];
|
||||
documentation = [ "https://netbird.io/docs/" ];
|
||||
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = with pkgs; [openresolv];
|
||||
path = with pkgs; [ openresolv ];
|
||||
|
||||
inherit environment;
|
||||
inherit environment;
|
||||
|
||||
serviceConfig = {
|
||||
EnvironmentFile = mkIf (environmentFile != null) environmentFile;
|
||||
ExecStart = "${getExe cfg.package} service run";
|
||||
Restart = "always";
|
||||
RuntimeDirectory = stateDir;
|
||||
StateDirectory = stateDir;
|
||||
StateDirectoryMode = "0700";
|
||||
WorkingDirectory = "/var/lib/${stateDir}";
|
||||
RuntimeDirectoryMode =
|
||||
if userAccess
|
||||
then "0755"
|
||||
else "0750";
|
||||
serviceConfig = {
|
||||
EnvironmentFile = mkIf (environmentFile != null) environmentFile;
|
||||
ExecStart = "${getExe cfg.package} service run";
|
||||
Restart = "always";
|
||||
RuntimeDirectory = stateDir;
|
||||
StateDirectory = stateDir;
|
||||
StateDirectoryMode = "0700";
|
||||
WorkingDirectory = "/var/lib/${stateDir}";
|
||||
RuntimeDirectoryMode = if userAccess then "0755" else "0750";
|
||||
|
||||
# hardening
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = false; # needed to load wg module for kernel-mode WireGuard
|
||||
ProtectKernelTunables = false;
|
||||
ProtectSystem = true;
|
||||
RemoveIPC = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
# hardening
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = false; # needed to load wg module for kernel-mode WireGuard
|
||||
ProtectKernelTunables = false;
|
||||
ProtectSystem = true;
|
||||
RemoveIPC = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
|
||||
# Hardening
|
||||
#CapabilityBoundingSet = "";
|
||||
#PrivateUsers = true;
|
||||
#ProtectProc = "invisible";
|
||||
#ProcSubset = "pid";
|
||||
#RestrictAddressFamilies = [
|
||||
# "AF_INET"
|
||||
# "AF_INET6"
|
||||
# "AF_NETLINK"
|
||||
#];
|
||||
#SystemCallArchitectures = "native";
|
||||
#SystemCallFilter = [
|
||||
# "@system-service"
|
||||
# "@pkey"
|
||||
#];
|
||||
UMask = "0077";
|
||||
};
|
||||
# Hardening
|
||||
#CapabilityBoundingSet = "";
|
||||
#PrivateUsers = true;
|
||||
#ProtectProc = "invisible";
|
||||
#ProcSubset = "pid";
|
||||
#RestrictAddressFamilies = [
|
||||
# "AF_INET"
|
||||
# "AF_INET6"
|
||||
# "AF_NETLINK"
|
||||
#];
|
||||
#SystemCallArchitectures = "native";
|
||||
#SystemCallFilter = [
|
||||
# "@system-service"
|
||||
# "@pkey"
|
||||
#];
|
||||
UMask = "0077";
|
||||
};
|
||||
|
||||
stopIfChanged = false;
|
||||
}
|
||||
)
|
||||
cfg.tunnels;
|
||||
stopIfChanged = false;
|
||||
}
|
||||
) cfg.tunnels;
|
||||
})
|
||||
];
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
inputs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
mapAttrs
|
||||
assertMsg
|
||||
types
|
||||
|
@ -16,22 +16,25 @@
|
|||
# If the given expression is a bare set, it will be wrapped in a function,
|
||||
# so that the imported file can always be applied to the inputs, similar to
|
||||
# how modules can be functions or sets.
|
||||
constSet = x:
|
||||
if builtins.isAttrs x
|
||||
then (_: x)
|
||||
else x;
|
||||
constSet = x: if builtins.isAttrs x then (_: x) else x;
|
||||
|
||||
rageImportEncrypted = assert assertMsg (builtins ? extraBuiltins.rageImportEncrypted) "The rageImportEncrypted extra plugin is not loaded";
|
||||
rageImportEncrypted =
|
||||
assert assertMsg (
|
||||
builtins ? extraBuiltins.rageImportEncrypted
|
||||
) "The rageImportEncrypted extra plugin is not loaded";
|
||||
builtins.extraBuiltins.rageImportEncrypted;
|
||||
# This "imports" an encrypted .nix.age file
|
||||
importEncrypted = path:
|
||||
importEncrypted =
|
||||
path:
|
||||
constSet (
|
||||
if builtins.pathExists path
|
||||
then rageImportEncrypted inputs.self.secretsConfig.masterIdentities path
|
||||
else {}
|
||||
if builtins.pathExists path then
|
||||
rageImportEncrypted inputs.self.secretsConfig.masterIdentities path
|
||||
else
|
||||
{ }
|
||||
);
|
||||
cfg = config.secrets;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.secrets = {
|
||||
defineRageBuiltins = mkOption {
|
||||
default = true;
|
||||
|
@ -43,7 +46,7 @@ in {
|
|||
};
|
||||
|
||||
secretFiles = mkOption {
|
||||
default = {};
|
||||
default = { };
|
||||
type = types.attrsOf types.path;
|
||||
example = literalExpression "{ local = ./secrets.nix.age; }";
|
||||
description = mdDoc ''
|
||||
|
@ -56,28 +59,30 @@ in {
|
|||
|
||||
secrets = mkOption {
|
||||
readOnly = true;
|
||||
default =
|
||||
mapAttrs (_: x: importEncrypted x inputs) cfg.secretFiles;
|
||||
default = mapAttrs (_: x: importEncrypted x inputs) cfg.secretFiles;
|
||||
description = mdDoc ''
|
||||
the secrets decrypted from the secretFiles
|
||||
'';
|
||||
};
|
||||
};
|
||||
config.home-manager.sharedModules = [
|
||||
({config, ...}: {
|
||||
options = {
|
||||
userSecretsFile = mkOption {
|
||||
default = ../users/${config._module.args.name}/secrets.nix.age;
|
||||
type = types.path;
|
||||
description = "The global secrets attribute that should be exposed to the user";
|
||||
(
|
||||
{ config, ... }:
|
||||
{
|
||||
options = {
|
||||
userSecretsFile = mkOption {
|
||||
default = ../users/${config._module.args.name}/secrets.nix.age;
|
||||
type = types.path;
|
||||
description = "The global secrets attribute that should be exposed to the user";
|
||||
};
|
||||
userSecrets = mkOption {
|
||||
readOnly = true;
|
||||
default = importEncrypted config.userSecretsFile inputs;
|
||||
type = types.unspecified;
|
||||
description = "User secrets";
|
||||
};
|
||||
};
|
||||
userSecrets = mkOption {
|
||||
readOnly = true;
|
||||
default = importEncrypted config.userSecretsFile inputs;
|
||||
type = types.unspecified;
|
||||
description = "User secrets";
|
||||
};
|
||||
};
|
||||
})
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
mkOption
|
||||
types
|
||||
flip
|
||||
|
@ -19,83 +19,85 @@
|
|||
"x-systemd.device-timeout=5s"
|
||||
"x-systemd.mount-timeout=5s"
|
||||
];
|
||||
in {
|
||||
in
|
||||
{
|
||||
# Give users the ability to add their own smb shares
|
||||
home-manager.sharedModules = [
|
||||
{
|
||||
options.home.smb = mkOption {
|
||||
description = "Samba shares to be mountable under $HOME/smb";
|
||||
default = [];
|
||||
type = types.listOf (types.submodule ({config, ...}: {
|
||||
options = {
|
||||
localPath = mkOption {
|
||||
description = "The path under which the share will be mounted. Defaults to the remotePath";
|
||||
type = types.str;
|
||||
default = config.remotePath;
|
||||
};
|
||||
address = mkOption {
|
||||
description = "The remote share address";
|
||||
type = types.str;
|
||||
example = "10.1.2.5";
|
||||
};
|
||||
remotePath = mkOption {
|
||||
description = "The remote share path";
|
||||
type = types.str;
|
||||
example = "data-10";
|
||||
};
|
||||
credentials = mkOption {
|
||||
description = "A smb credential file to access the remote share";
|
||||
type = types.path;
|
||||
};
|
||||
automatic = mkOption {
|
||||
description = "Whether this share should be automatically mounted on boot";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = [ ];
|
||||
type = types.listOf (
|
||||
types.submodule (
|
||||
{ config, ... }:
|
||||
{
|
||||
options = {
|
||||
localPath = mkOption {
|
||||
description = "The path under which the share will be mounted. Defaults to the remotePath";
|
||||
type = types.str;
|
||||
default = config.remotePath;
|
||||
};
|
||||
address = mkOption {
|
||||
description = "The remote share address";
|
||||
type = types.str;
|
||||
example = "10.1.2.5";
|
||||
};
|
||||
remotePath = mkOption {
|
||||
description = "The remote share path";
|
||||
type = types.str;
|
||||
example = "data-10";
|
||||
};
|
||||
credentials = mkOption {
|
||||
description = "A smb credential file to access the remote share";
|
||||
type = types.path;
|
||||
};
|
||||
automatic = mkOption {
|
||||
description = "Whether this share should be automatically mounted on boot";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
imports = [
|
||||
{
|
||||
environment.systemPackages = [pkgs.cifs-utils];
|
||||
fileSystems =
|
||||
mkMerge
|
||||
(
|
||||
flip
|
||||
concatMap
|
||||
(attrNames config.home-manager.users)
|
||||
(
|
||||
user: let
|
||||
parentPath = "/home/${user}/smb";
|
||||
cfg = config.home-manager.users.${user}.home.smb;
|
||||
inherit (config.users.users.${user}) uid;
|
||||
inherit (config.users.groups.${user}) gid;
|
||||
in
|
||||
flip map cfg (
|
||||
cfg: {
|
||||
"${parentPath}/${cfg.localPath}" = let
|
||||
options =
|
||||
baseOptions
|
||||
++ [
|
||||
"uid=${toString uid}"
|
||||
"gid=${toString gid}"
|
||||
"file_mode=0600"
|
||||
"dir_mode=0700"
|
||||
"credentials=${cfg.credentials}"
|
||||
]
|
||||
++ (optional (!cfg.automatic) "noauto");
|
||||
in {
|
||||
inherit options;
|
||||
device = "//${cfg.address}/${cfg.remotePath}";
|
||||
fsType = "cifs";
|
||||
};
|
||||
}
|
||||
)
|
||||
)
|
||||
);
|
||||
environment.systemPackages = [ pkgs.cifs-utils ];
|
||||
fileSystems = mkMerge (
|
||||
flip concatMap (attrNames config.home-manager.users) (
|
||||
user:
|
||||
let
|
||||
parentPath = "/home/${user}/smb";
|
||||
cfg = config.home-manager.users.${user}.home.smb;
|
||||
inherit (config.users.users.${user}) uid;
|
||||
inherit (config.users.groups.${user}) gid;
|
||||
in
|
||||
flip map cfg (cfg: {
|
||||
"${parentPath}/${cfg.localPath}" =
|
||||
let
|
||||
options =
|
||||
baseOptions
|
||||
++ [
|
||||
"uid=${toString uid}"
|
||||
"gid=${toString gid}"
|
||||
"file_mode=0600"
|
||||
"dir_mode=0700"
|
||||
"credentials=${cfg.credentials}"
|
||||
]
|
||||
++ (optional (!cfg.automatic) "noauto");
|
||||
in
|
||||
{
|
||||
inherit options;
|
||||
device = "//${cfg.address}/${cfg.remotePath}";
|
||||
fsType = "cifs";
|
||||
};
|
||||
})
|
||||
)
|
||||
);
|
||||
}
|
||||
];
|
||||
}
|
||||
|
|
131
nix/devshell.nix
131
nix/devshell.nix
|
@ -1,70 +1,69 @@
|
|||
{
|
||||
self,
|
||||
...
|
||||
}: system: let
|
||||
{ self, ... }:
|
||||
system:
|
||||
let
|
||||
pkgs = self.pkgs.${system};
|
||||
in
|
||||
pkgs.devshell.mkShell {
|
||||
name = "nix-config";
|
||||
packages = with pkgs; [
|
||||
# Nix
|
||||
nil
|
||||
pkgs.devshell.mkShell {
|
||||
name = "nix-config";
|
||||
packages = with pkgs; [
|
||||
# Nix
|
||||
nil
|
||||
|
||||
# Misc
|
||||
shellcheck
|
||||
pre-commit
|
||||
rage
|
||||
nix
|
||||
nix-diff
|
||||
nix-update
|
||||
];
|
||||
commands = [
|
||||
{
|
||||
package = pkgs.deploy;
|
||||
help = "build and deploy nix configurations";
|
||||
}
|
||||
{
|
||||
package = pkgs.agenix-rekey;
|
||||
help = "Edit and rekey repository secrets";
|
||||
}
|
||||
{
|
||||
package = pkgs.nixfmt-rfc-style;
|
||||
help = "Format nix code";
|
||||
}
|
||||
{
|
||||
package = pkgs.statix;
|
||||
help = "Linter for nix";
|
||||
}
|
||||
{
|
||||
package = pkgs.deadnix;
|
||||
help = "Remove dead nix code";
|
||||
}
|
||||
{
|
||||
package = pkgs.nix-tree;
|
||||
help = "Show nix closure tree";
|
||||
}
|
||||
{
|
||||
package = pkgs.update-nix-fetchgit;
|
||||
help = "Update fetcher inside nix files";
|
||||
}
|
||||
{
|
||||
package = pkgs.nvd;
|
||||
help = "List package differences between systems";
|
||||
}
|
||||
{
|
||||
package = pkgs.vulnix;
|
||||
help = "List vulnerabilities found in your system";
|
||||
}
|
||||
];
|
||||
env = [
|
||||
{
|
||||
name = "NIX_CONFIG";
|
||||
value = ''
|
||||
plugin-files = ${pkgs.nix-plugins}/lib/nix/plugins
|
||||
extra-builtins-file = ${../nix}/extra-builtins.nix
|
||||
'';
|
||||
}
|
||||
];
|
||||
# Misc
|
||||
shellcheck
|
||||
pre-commit
|
||||
rage
|
||||
nix
|
||||
nix-diff
|
||||
nix-update
|
||||
];
|
||||
commands = [
|
||||
{
|
||||
package = pkgs.deploy;
|
||||
help = "build and deploy nix configurations";
|
||||
}
|
||||
{
|
||||
package = pkgs.agenix-rekey;
|
||||
help = "Edit and rekey repository secrets";
|
||||
}
|
||||
{
|
||||
package = pkgs.nixfmt-rfc-style;
|
||||
help = "Format nix code";
|
||||
}
|
||||
{
|
||||
package = pkgs.statix;
|
||||
help = "Linter for nix";
|
||||
}
|
||||
{
|
||||
package = pkgs.deadnix;
|
||||
help = "Remove dead nix code";
|
||||
}
|
||||
{
|
||||
package = pkgs.nix-tree;
|
||||
help = "Show nix closure tree";
|
||||
}
|
||||
{
|
||||
package = pkgs.update-nix-fetchgit;
|
||||
help = "Update fetcher inside nix files";
|
||||
}
|
||||
{
|
||||
package = pkgs.nvd;
|
||||
help = "List package differences between systems";
|
||||
}
|
||||
{
|
||||
package = pkgs.vulnix;
|
||||
help = "List vulnerabilities found in your system";
|
||||
}
|
||||
];
|
||||
env = [
|
||||
{
|
||||
name = "NIX_CONFIG";
|
||||
value = ''
|
||||
plugin-files = ${pkgs.nix-plugins}/lib/nix/plugins
|
||||
extra-builtins-file = ${../nix}/extra-builtins.nix
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
devshell.startup.pre-commit.text = self.checks.${system}.pre-commit-check.shellHook;
|
||||
}
|
||||
devshell.startup.pre-commit.text = self.checks.${system}.pre-commit-check.shellHook;
|
||||
}
|
||||
|
|
|
@ -14,21 +14,40 @@
|
|||
# '';
|
||||
# }
|
||||
# ```
|
||||
{exec, ...}: let
|
||||
{ exec, ... }:
|
||||
let
|
||||
assertMsg = pred: msg: pred || builtins.throw msg;
|
||||
hasSuffix = suffix: content: let
|
||||
lenContent = builtins.stringLength content;
|
||||
lenSuffix = builtins.stringLength suffix;
|
||||
in
|
||||
hasSuffix =
|
||||
suffix: content:
|
||||
let
|
||||
lenContent = builtins.stringLength content;
|
||||
lenSuffix = builtins.stringLength suffix;
|
||||
in
|
||||
lenContent >= lenSuffix && builtins.substring (lenContent - lenSuffix) lenContent content == suffix;
|
||||
in {
|
||||
in
|
||||
{
|
||||
# Instead of calling rage directly here, we call a wrapper script that will cache the output
|
||||
# in a predictable path in /tmp, which allows us to only require the password for each encrypted
|
||||
# file once.
|
||||
rageImportEncrypted = identities: nixFile:
|
||||
assert assertMsg (builtins.isPath nixFile) "The file to decrypt must be given as a path to prevent impurity.";
|
||||
assert assertMsg (hasSuffix ".nix.age" nixFile) "The content of the decrypted file must be a nix expression and should therefore end in .nix.age";
|
||||
exec ([./rage-decrypt-and-cache.sh nixFile] ++ identities);
|
||||
rageImportEncrypted =
|
||||
identities: nixFile:
|
||||
assert assertMsg (builtins.isPath nixFile)
|
||||
"The file to decrypt must be given as a path to prevent impurity.";
|
||||
assert assertMsg (hasSuffix ".nix.age" nixFile)
|
||||
"The content of the decrypted file must be a nix expression and should therefore end in .nix.age";
|
||||
exec (
|
||||
[
|
||||
./rage-decrypt-and-cache.sh
|
||||
nixFile
|
||||
]
|
||||
++ identities
|
||||
);
|
||||
# currentSystem
|
||||
unsafeCurrentSystem = exec ["nix" "eval" "--impure" "--expr" "builtins.currentSystem"];
|
||||
unsafeCurrentSystem = exec [
|
||||
"nix"
|
||||
"eval"
|
||||
"--impure"
|
||||
"--expr"
|
||||
"builtins.currentSystem"
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
{self, ...}: nodeName: nodeAttrs: let
|
||||
{ self, ... }:
|
||||
nodeName: nodeAttrs:
|
||||
let
|
||||
#FIXME inherit nodeAttrs. system;
|
||||
system = "x86_64-linux";
|
||||
pkgs = self.pkgs.${system};
|
||||
|
||||
disko-script = pkgs.writeShellScriptBin "disko-script" "${nodeAttrs.config.system.build.diskoScript}";
|
||||
disko-script = pkgs.writeShellScriptBin "disko-script" "${nodeAttrs.config.system.build.diskoScript
|
||||
}";
|
||||
disko-mount = pkgs.writeShellScriptBin "disko-mount" "${nodeAttrs.config.system.build.mountScript}";
|
||||
disko-format = pkgs.writeShellScriptBin "disko-format" "${nodeAttrs.config.system.build.formatScript}";
|
||||
disko-format = pkgs.writeShellScriptBin "disko-format" "${nodeAttrs.config.system.build.formatScript
|
||||
}";
|
||||
|
||||
install-system = pkgs.writeShellScriptBin "install-system" ''
|
||||
set -euo pipefail
|
||||
|
@ -28,7 +32,8 @@
|
|||
install-system
|
||||
];
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
# Everything required for the installer as a single package,
|
||||
# so it can be used from an existing live system by copying the derivation.
|
||||
packages.${system}.installer-package.${nodeName} = installer-package;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
inputs: let
|
||||
inputs:
|
||||
let
|
||||
inherit (inputs) self;
|
||||
inherit
|
||||
(inputs.nixpkgs.lib)
|
||||
inherit (inputs.nixpkgs.lib)
|
||||
concatMapAttrs
|
||||
filterAttrs
|
||||
flip
|
||||
|
@ -12,9 +12,12 @@ inputs: let
|
|||
;
|
||||
|
||||
# Creates a new nixosSystem with the correct specialArgs, pkgs and name definition
|
||||
mkHost = {minimal}: name: let
|
||||
pkgs = self.pkgs.x86_64-linux;
|
||||
in
|
||||
mkHost =
|
||||
{ minimal }:
|
||||
name:
|
||||
let
|
||||
pkgs = self.pkgs.x86_64-linux;
|
||||
in
|
||||
nixosSystem {
|
||||
specialArgs = {
|
||||
# Use the correct instance lib that has our overlays
|
||||
|
@ -42,22 +45,30 @@ inputs: let
|
|||
# to instanciate hosts correctly.
|
||||
hosts = builtins.attrNames (filterAttrs (_: type: type == "directory") (builtins.readDir ../hosts));
|
||||
# Process each nixosHosts declaration and generatea nixosSystem definitions
|
||||
nixosConfigurations = genAttrs hosts (mkHost {minimal = false;});
|
||||
minimalConfigurations = genAttrs hosts (mkHost {minimal = true;});
|
||||
nixosConfigurations = genAttrs hosts (mkHost {
|
||||
minimal = false;
|
||||
});
|
||||
minimalConfigurations = genAttrs hosts (mkHost {
|
||||
minimal = true;
|
||||
});
|
||||
|
||||
# True NixOS nodes can define additional guest nodes that are built
|
||||
# together with it. We collect all defined guests from each node here
|
||||
# to allow accessing any node via the unified attribute `nodes`.
|
||||
guestConfigurations = flip concatMapAttrs self.nixosConfigurations (_: node:
|
||||
flip mapAttrs' (node.config.guests or {}) (
|
||||
guestConfigurations = flip concatMapAttrs self.nixosConfigurations (
|
||||
_: node:
|
||||
flip mapAttrs' (node.config.guests or { }) (
|
||||
guestName: guestDef:
|
||||
nameValuePair guestDef.nodeName (
|
||||
if guestDef.backend == "microvm"
|
||||
then node.config.microvm.vms.${guestName}.config
|
||||
else node.config.containers.${guestName}.nixosConfiguration
|
||||
)
|
||||
));
|
||||
in {
|
||||
nameValuePair guestDef.nodeName (
|
||||
if guestDef.backend == "microvm" then
|
||||
node.config.microvm.vms.${guestName}.config
|
||||
else
|
||||
node.config.containers.${guestName}.nixosConfiguration
|
||||
)
|
||||
)
|
||||
);
|
||||
in
|
||||
{
|
||||
inherit
|
||||
hosts
|
||||
nixosConfigurations
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
nix.extraOptions = ''
|
||||
experimental-features = nix-command flakes recursive-nix
|
||||
'';
|
||||
|
|
|
@ -1 +1 @@
|
|||
{}
|
||||
{ }
|
||||
|
|
|
@ -71,7 +71,7 @@ stdenv.mkDerivation rec {
|
|||
homepage = "https://actualbudget.com/";
|
||||
license = licenses.mit;
|
||||
mainProgram = "actual-server";
|
||||
maintainers = with maintainers; [patrickdag];
|
||||
platforms = ["x86_64-linux"];
|
||||
maintainers = with maintainers; [ patrickdag ];
|
||||
platforms = [ "x86_64-linux" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
{
|
||||
pkgs,
|
||||
fetchurl,
|
||||
}: let
|
||||
{ pkgs, fetchurl }:
|
||||
let
|
||||
name = "awakened-poe-trade";
|
||||
version = "3.22.10003";
|
||||
description = "Path of Exile trading app for price checking";
|
||||
|
@ -23,17 +21,17 @@
|
|||
sha256 = "sha256-fZ3PU+yE1n/RytkPFAXQhU85KNQStYcSrdgw+OYfJRg=";
|
||||
};
|
||||
in
|
||||
pkgs.appimageTools.wrapType2 {
|
||||
name = "awakened-poe-trade";
|
||||
src = fetchurl {
|
||||
url = "https://github.com/SnosMe/awakened-poe-trade/releases/download/v${version}/${file}";
|
||||
hash = "sha256-b+cDOmU0s0MqP5ZgCacmAon8UqDejG4HcOqi+Uf2dEM=";
|
||||
};
|
||||
pkgs.appimageTools.wrapType2 {
|
||||
name = "awakened-poe-trade";
|
||||
src = fetchurl {
|
||||
url = "https://github.com/SnosMe/awakened-poe-trade/releases/download/v${version}/${file}";
|
||||
hash = "sha256-b+cDOmU0s0MqP5ZgCacmAon8UqDejG4HcOqi+Uf2dEM=";
|
||||
};
|
||||
|
||||
extraInstallCommands = ''
|
||||
mkdir -p $out/share/applications
|
||||
cp ${icon} $out/share/applications/awakened-poe-trade.png
|
||||
cp ${desktopEntry} $out/share/applications/${name}.desktop
|
||||
substituteInPlace $out/share/applications/awakened-poe-trade.desktop --replace /share/ $out/share/
|
||||
'';
|
||||
}
|
||||
extraInstallCommands = ''
|
||||
mkdir -p $out/share/applications
|
||||
cp ${icon} $out/share/applications/awakened-poe-trade.png
|
||||
cp ${desktopEntry} $out/share/applications/${name}.desktop
|
||||
substituteInPlace $out/share/applications/awakened-poe-trade.desktop --replace /share/ $out/share/
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,39 +1,43 @@
|
|||
[
|
||||
(import ./scripts)
|
||||
(_self: super: {
|
||||
zsh-histdb-skim = super.callPackage ./zsh-histdb-skim.nix {};
|
||||
zsh-histdb = super.callPackage ./zsh-histdb.nix {};
|
||||
actual = super.callPackage ./actual.nix {};
|
||||
pr-tracker = super.callPackage ./pr-tracker.nix {};
|
||||
homebox = super.callPackage ./homebox.nix {};
|
||||
deploy = super.callPackage ./deploy.nix {};
|
||||
mongodb-bin = super.callPackage ./mongodb-bin.nix {};
|
||||
awakened-poe-trade = super.callPackage ./awakened-poe-trade.nix {};
|
||||
neovim-clean = super.neovim-unwrapped.overrideAttrs (_neovimFinal: neovimPrev: {
|
||||
nativeBuildInputs = (neovimPrev.nativeBuildInputs or []) ++ [super.makeWrapper];
|
||||
postInstall =
|
||||
(neovimPrev.postInstall or "")
|
||||
+ ''
|
||||
wrapProgram $out/bin/nvim --add-flags "--clean"
|
||||
'';
|
||||
});
|
||||
kanidm = super.kanidm.overrideAttrs (old: let
|
||||
provisionSrc = super.fetchFromGitHub {
|
||||
owner = "oddlama";
|
||||
repo = "kanidm-provision";
|
||||
rev = "v1.1.0";
|
||||
hash = "sha256-pFOFFKh3la/sZGXj+pAM8x4SMeffvvbOvTjPeHS1XPU=";
|
||||
};
|
||||
in {
|
||||
patches =
|
||||
old.patches
|
||||
++ [
|
||||
zsh-histdb-skim = super.callPackage ./zsh-histdb-skim.nix { };
|
||||
zsh-histdb = super.callPackage ./zsh-histdb.nix { };
|
||||
actual = super.callPackage ./actual.nix { };
|
||||
pr-tracker = super.callPackage ./pr-tracker.nix { };
|
||||
homebox = super.callPackage ./homebox.nix { };
|
||||
deploy = super.callPackage ./deploy.nix { };
|
||||
mongodb-bin = super.callPackage ./mongodb-bin.nix { };
|
||||
awakened-poe-trade = super.callPackage ./awakened-poe-trade.nix { };
|
||||
neovim-clean = super.neovim-unwrapped.overrideAttrs (
|
||||
_neovimFinal: neovimPrev: {
|
||||
nativeBuildInputs = (neovimPrev.nativeBuildInputs or [ ]) ++ [ super.makeWrapper ];
|
||||
postInstall =
|
||||
(neovimPrev.postInstall or "")
|
||||
+ ''
|
||||
wrapProgram $out/bin/nvim --add-flags "--clean"
|
||||
'';
|
||||
}
|
||||
);
|
||||
kanidm = super.kanidm.overrideAttrs (
|
||||
old:
|
||||
let
|
||||
provisionSrc = super.fetchFromGitHub {
|
||||
owner = "oddlama";
|
||||
repo = "kanidm-provision";
|
||||
rev = "v1.1.0";
|
||||
hash = "sha256-pFOFFKh3la/sZGXj+pAM8x4SMeffvvbOvTjPeHS1XPU=";
|
||||
};
|
||||
in
|
||||
{
|
||||
patches = old.patches ++ [
|
||||
"${provisionSrc}/patches/1.2.0-oauth2-basic-secret-modify.patch"
|
||||
"${provisionSrc}/patches/1.2.0-recover-account.patch"
|
||||
];
|
||||
passthru.enableSecretProvisioning = true;
|
||||
doCheck = false;
|
||||
});
|
||||
kanidm-provision = super.callPackage ./kanidm-provision.nix {};
|
||||
passthru.enableSecretProvisioning = true;
|
||||
doCheck = false;
|
||||
}
|
||||
);
|
||||
kanidm-provision = super.callPackage ./kanidm-provision.nix { };
|
||||
})
|
||||
]
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
writeShellApplication,
|
||||
nvd,
|
||||
nix-output-monitor,
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
deploy = writeShellApplication {
|
||||
name = "deploy";
|
||||
text = ''
|
||||
|
@ -166,7 +167,10 @@
|
|||
'';
|
||||
};
|
||||
in
|
||||
symlinkJoin {
|
||||
name = "deploy and build";
|
||||
paths = [deploy build];
|
||||
}
|
||||
symlinkJoin {
|
||||
name = "deploy and build";
|
||||
paths = [
|
||||
deploy
|
||||
build
|
||||
];
|
||||
}
|
||||
|
|
|
@ -7,7 +7,8 @@
|
|||
lib,
|
||||
buildGoModule,
|
||||
fetchFromGitHub,
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
pname = "homebox";
|
||||
version = "0.10.3";
|
||||
src = "${fetchFromGitHub {
|
||||
|
@ -101,38 +102,38 @@
|
|||
outputHash = "sha256-BVZSdc8e6v+paMzMYazEdnKSNw+OnCpjSzGSEKxVl24=";
|
||||
};
|
||||
in
|
||||
buildGoModule {
|
||||
inherit pname version;
|
||||
src = "${src}/backend";
|
||||
buildGoModule {
|
||||
inherit pname version;
|
||||
src = "${src}/backend";
|
||||
|
||||
vendorHash = "sha256-TtFz+dDpoMs3PAQjiYQm1+Q6prn4Hiaf7xqWt41oY7w=";
|
||||
vendorHash = "sha256-TtFz+dDpoMs3PAQjiYQm1+Q6prn4Hiaf7xqWt41oY7w=";
|
||||
|
||||
CGO_ENABLED = 0;
|
||||
GOOS = "linux";
|
||||
doCheck = false;
|
||||
CGO_ENABLED = 0;
|
||||
GOOS = "linux";
|
||||
doCheck = false;
|
||||
|
||||
# options used by upstream:
|
||||
# https://github.com/simulot/immich-go/blob/0.13.2/.goreleaser.yaml
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-extldflags=-static"
|
||||
"-X main.version=${version}"
|
||||
];
|
||||
# options used by upstream:
|
||||
# https://github.com/simulot/immich-go/blob/0.13.2/.goreleaser.yaml
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-extldflags=-static"
|
||||
"-X main.version=${version}"
|
||||
];
|
||||
|
||||
preBuild = ''
|
||||
ldflags+=" -X main.commit=$(cat COMMIT)"
|
||||
ldflags+=" -X main.date=$(cat SOURCE_DATE)"
|
||||
mkdir -p ./app/api/static/public
|
||||
cp -r ${frontend}/* ./app/api/static/public
|
||||
'';
|
||||
preBuild = ''
|
||||
ldflags+=" -X main.commit=$(cat COMMIT)"
|
||||
ldflags+=" -X main.date=$(cat SOURCE_DATE)"
|
||||
mkdir -p ./app/api/static/public
|
||||
cp -r ${frontend}/* ./app/api/static/public
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
mainProgram = "api";
|
||||
homepage = "https://hay-kot.github.io/homebox/";
|
||||
maintainers = with maintainers; [patrickdag];
|
||||
license = licenses.agpl3Only;
|
||||
description = "A inventory and organization system built for the Home User";
|
||||
platforms = platforms.all;
|
||||
};
|
||||
}
|
||||
meta = with lib; {
|
||||
mainProgram = "api";
|
||||
homepage = "https://hay-kot.github.io/homebox/";
|
||||
maintainers = with maintainers; [ patrickdag ];
|
||||
license = licenses.agpl3Only;
|
||||
description = "A inventory and organization system built for the Home User";
|
||||
platforms = platforms.all;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -19,8 +19,11 @@ rustPlatform.buildRustPackage rec {
|
|||
meta = with lib; {
|
||||
description = "A small utility to help with kanidm provisioning";
|
||||
homepage = "https://github.com/oddlama/kanidm-provision";
|
||||
license = with licenses; [asl20 mit];
|
||||
maintainers = with maintainers; [oddlama];
|
||||
license = with licenses; [
|
||||
asl20
|
||||
mit
|
||||
];
|
||||
maintainers = with maintainers; [ oddlama ];
|
||||
mainProgram = "kanidm-provision";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -11,27 +11,26 @@ stdenv.mkDerivation {
|
|||
pname = "mongodb-bin";
|
||||
version = "1.0.0";
|
||||
srcs = [
|
||||
(
|
||||
fetchurl {
|
||||
url = "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2204-6.0.14.tgz";
|
||||
hash = "sha256-1MW3pVIffdxq63gY64ozM1erWM2ou2L8T+MTfG+ZPLg=";
|
||||
}
|
||||
)
|
||||
(
|
||||
fetchurl {
|
||||
url = "https://downloads.mongodb.com/compass/mongosh-2.1.5-linux-x64.tgz";
|
||||
hash = "sha256-R1GGB0ZGqmpJtMUNF2+EJK6iNiChHuoHyOf2vKDcOKA=";
|
||||
}
|
||||
)
|
||||
(fetchurl {
|
||||
url = "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2204-6.0.14.tgz";
|
||||
hash = "sha256-1MW3pVIffdxq63gY64ozM1erWM2ou2L8T+MTfG+ZPLg=";
|
||||
})
|
||||
(fetchurl {
|
||||
url = "https://downloads.mongodb.com/compass/mongosh-2.1.5-linux-x64.tgz";
|
||||
hash = "sha256-R1GGB0ZGqmpJtMUNF2+EJK6iNiChHuoHyOf2vKDcOKA=";
|
||||
})
|
||||
];
|
||||
sourceRoot = ".";
|
||||
nativeBuildInputs = [
|
||||
autoPatchelfHook
|
||||
];
|
||||
nativeBuildInputs = [ autoPatchelfHook ];
|
||||
buildPhase = ''
|
||||
mkdir -p $out/bin
|
||||
cp mongosh-2.1.5-linux-x64/bin/mongosh $out/bin/mongo
|
||||
cp mongodb-linux-x86_64-ubuntu2204-6.0.14/bin/mongod $out/bin/mongod
|
||||
'';
|
||||
buildInputs = [openssl curl xz libgcc];
|
||||
buildInputs = [
|
||||
openssl
|
||||
curl
|
||||
xz
|
||||
libgcc
|
||||
];
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ buildNpmPackage rec {
|
|||
homepage = "https://github.com/ollama-webui/ollama-webui";
|
||||
license = licenses.mit;
|
||||
mainProgram = pname;
|
||||
maintainers = with maintainers; [malteneuss];
|
||||
maintainers = with maintainers; [ malteneuss ];
|
||||
platforms = platforms.all;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -18,8 +18,11 @@ rustPlatform.buildRustPackage {
|
|||
|
||||
cargoHash = "sha256-9bhKtg2g5H4zGn7yVCjTazeXfeoKjtAKAlzkLkCraiw=";
|
||||
|
||||
nativeBuildInputs = [pkg-config];
|
||||
buildInputs = [openssl systemd];
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
buildInputs = [
|
||||
openssl
|
||||
systemd
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Nixpkgs pull request channel tracker";
|
||||
|
@ -29,7 +32,7 @@ rustPlatform.buildRustPackage {
|
|||
'';
|
||||
platforms = platforms.linux;
|
||||
license = licenses.agpl3Plus;
|
||||
maintainers = with maintainers; [patrickdag];
|
||||
maintainers = with maintainers; [ patrickdag ];
|
||||
mainProgram = "pr-tracker";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -7,7 +7,12 @@
|
|||
}:
|
||||
writeShellApplication {
|
||||
name = "clone-term";
|
||||
runtimeInputs = [ps procps xdotool jq];
|
||||
runtimeInputs = [
|
||||
ps
|
||||
procps
|
||||
xdotool
|
||||
jq
|
||||
];
|
||||
text = ''
|
||||
|
||||
if [[ ''${XDG_CURRENT_DESKTOP-} == sway ]]; then
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
_final: prev: {
|
||||
scripts = {
|
||||
usbguardw = prev.callPackage ./usbguardw.nix {};
|
||||
clone-term = prev.callPackage ./clone-term.nix {};
|
||||
impermanence-o = prev.callPackage ./impermanence-orphan.nix {};
|
||||
usbguardw = prev.callPackage ./usbguardw.nix { };
|
||||
clone-term = prev.callPackage ./clone-term.nix { };
|
||||
impermanence-o = prev.callPackage ./impermanence-orphan.nix { };
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{writers}:
|
||||
writers.writePython3Bin "find-orphaned" {} ''
|
||||
{ writers }:
|
||||
writers.writePython3Bin "find-orphaned" { } ''
|
||||
import sys
|
||||
import os
|
||||
if len(sys.argv) != 2:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{writeShellApplication}:
|
||||
{ writeShellApplication }:
|
||||
writeShellApplication {
|
||||
name = "usguardw";
|
||||
text = ''
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
rustPlatform.buildRustPackage rec {
|
||||
pname = "zsh-histd-skim";
|
||||
version = "0.8.6";
|
||||
buildInputs = [sqlite];
|
||||
buildInputs = [ sqlite ];
|
||||
src = fetchFromGitHub {
|
||||
owner = "m42e";
|
||||
repo = "zsh-histdb-skim";
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home.packages = [
|
||||
pkgs.xclip
|
||||
pkgs.xdragon
|
||||
|
|
|
@ -79,13 +79,15 @@ MOD: TAGS: pkgs:
|
|||
"${MOD}-m " = "spawn ${pkgs.thunderbird}/bin/thunderbird";
|
||||
"Menu" = "spawn rofi -show drun";
|
||||
}
|
||||
// builtins.listToAttrs (map (x: {
|
||||
// builtins.listToAttrs (
|
||||
map (x: {
|
||||
name = "${MOD}-${x}";
|
||||
value = "use_index ${x}";
|
||||
})
|
||||
TAGS)
|
||||
// builtins.listToAttrs (map (x: {
|
||||
}) TAGS
|
||||
)
|
||||
// builtins.listToAttrs (
|
||||
map (x: {
|
||||
name = "${MOD}-Shift-${x}";
|
||||
value = "move_index ${x}";
|
||||
})
|
||||
TAGS)
|
||||
}) TAGS
|
||||
)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue