Compare commits
1 Commits
4129589665
...
kubernetes
Author | SHA1 | Date | |
---|---|---|---|
a8ca3653b4 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
**/wallpapers/*.jpg filter=lfs diff=lfs merge=lfs -text
|
||||
**/wallpapers/*.png filter=lfs diff=lfs merge=lfs -text
|
27
.gitlab-ci.yml
Normal file
27
.gitlab-ci.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
stages:
|
||||
- build
|
||||
- test
|
||||
|
||||
variables:
|
||||
GIT_SUBMODULE_STRATEGY: recursive
|
||||
|
||||
cache: &global_cache
|
||||
key:
|
||||
files:
|
||||
- flake.lock
|
||||
- flake.nix
|
||||
paths:
|
||||
- /nix/store
|
||||
policy: pull-push
|
||||
|
||||
build:
|
||||
image: nixos/nix
|
||||
stage: build
|
||||
timeout: 48h
|
||||
cache:
|
||||
<<: *global_cache
|
||||
script:
|
||||
- nix --experimental-features 'nix-command flakes' flake check --show-trace
|
||||
|
||||
include:
|
||||
- template: Jobs/Secret-Detection.gitlab-ci.yml
|
15
.gitmodules
vendored
15
.gitmodules
vendored
@@ -1,6 +1,9 @@
|
||||
[submodule "secrets"]
|
||||
path = submodules/secrets
|
||||
url = git@karaolidis.com:karaolidis/nix-secrets.git
|
||||
[submodule "sas"]
|
||||
path = submodules/sas
|
||||
url = git@karaolidis.com:karaolidis/nix-sas.git
|
||||
[submodule "submodules/nixpkgs"]
|
||||
path = submodules/nixpkgs
|
||||
url = git@github.com:karaolidis/nixpkgs.git
|
||||
branch = integration
|
||||
|
||||
[submodule "submodules/home-manager"]
|
||||
path = submodules/home-manager
|
||||
url = git@github.com:karaolidis/home-manager.git
|
||||
branch = integration
|
||||
|
18
README.md
18
README.md
@@ -16,14 +16,16 @@ NixOS dotfiles and configuration for various hosts and users.
|
||||
- [`gui/`](./hosts/common/configs/user/gui): GUI-related settings.
|
||||
- `<name>/`: Individual host configurations.
|
||||
|
||||
- [`packages/`](./packages/): Custom packages.
|
||||
- `secrets/<namespace>/`: Global secrets for individual namespaces that apply across all hosts.
|
||||
|
||||
- [`lib/`](./lib): Nix library function definitions and utilities.
|
||||
- [`scripts/`](./lib/scripts): Utility scripts for managing the repository.
|
||||
- [`add-host.sh`](./lib/scripts/add-host.sh): Instantiate the keys for a new host configuration.
|
||||
- [`remove-host.sh`](./lib/scripts/remove-host.sh): Remove references to a host.
|
||||
- [`update-keys.sh`](./lib/scripts/update-keys.sh): Update the encryption keys in all relevant files using `sops.yaml` configurations.
|
||||
- [`update.sh`](./lib/scripts/update.sh): Update flake and all packages.
|
||||
- [`update.sh`](./lib/scripts/update.sh): Update flake and all git submodules.
|
||||
|
||||
- [`submodules/`](./submodules): Flake forks used in the repository, such as [`nixpkgs`](https://github.com/NixOS/nixpkgs) and [`home-manager`](https://github.com/nix-community/home-manager).
|
||||
|
||||
Any `options.nix` files create custom option definitions when present.
|
||||
|
||||
@@ -31,10 +33,8 @@ Any `options.nix` files create custom option definitions when present.
|
||||
|
||||
Below is a table of all hosts, with links to their respective README files, which may provide further details and/or post-installation checklists.
|
||||
|
||||
| Host | README |
|
||||
| ------------- | ------------------------------------------------------------ |
|
||||
| `installer` | [hosts/installer/README.md](./hosts/installer/README.md) |
|
||||
| `himalia` | [hosts/himalia/README.md](./hosts/himalia/README.md) |
|
||||
| `elara` | [hosts/elara/README.md](./hosts/elara/README.md) |
|
||||
| `jupiter` | [hosts/jupiter/README.md](./hosts/jupiter/README.md) |
|
||||
| `jupiter-vps` | [hosts/jupiter-vps/README.md](./hosts/jupiter-vps/README.md) |
|
||||
| Host | README |
|
||||
|-------------|----------------------------------------------------------|
|
||||
| `installer` | [hosts/installer/README.md](./hosts/installer/README.md) |
|
||||
| `eirene` | [hosts/eirene/README.md](./hosts/eirene/README.md) |
|
||||
| `elara` | [hosts/elara/README.md](./hosts/elara/README.md) |
|
||||
|
322
flake.lock
generated
322
flake.lock
generated
@@ -10,11 +10,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754932414,
|
||||
"narHash": "sha256-V8c+68Axn5AGDCaG9Zv+EqNU4D6xWPHNXLIapq6AGiM=",
|
||||
"lastModified": 1736090999,
|
||||
"narHash": "sha256-B5CJuHqfJrzPa7tObK0H9669/EClSHpa/P7B9EuvElU=",
|
||||
"owner": "aylur",
|
||||
"repo": "ags",
|
||||
"rev": "9e6912b51d7bc58f35d10b11be1a126b926b56d3",
|
||||
"rev": "5527c3c07d92c11e04e7fd99d58429493dba7e3c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -30,11 +30,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754893912,
|
||||
"narHash": "sha256-kzU/3A4k+d3PsgMLohzSh4KJybTqvzqibUVqV2yXCGY=",
|
||||
"lastModified": 1736497508,
|
||||
"narHash": "sha256-murrCQMYKtZ8rkZ5O726ZCsCDee1l3ZdmV8yC9gRaIc=",
|
||||
"owner": "aylur",
|
||||
"repo": "astal",
|
||||
"rev": "5d4eef66392b0dff99a63a4f39ff886624bd69dd",
|
||||
"rev": "ef4f95608481414053ecdbe4de29bd86fb452813",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -43,21 +43,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"locked": {
|
||||
"lastModified": 1754269165,
|
||||
"narHash": "sha256-0tcS8FHd4QjbCVoxN9jI+PjHgA4vc/IjkUSp+N3zy0U=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "444e81206df3f7d92780680e45858e31d2f07a08",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"disko": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -65,16 +50,15 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1746728054,
|
||||
"narHash": "sha256-eDoSOhxGEm2PykZFa/x9QG5eTH0MJdiJ9aR00VAofXE=",
|
||||
"lastModified": 1736437680,
|
||||
"narHash": "sha256-9Sy17XguKdEU9M5peTrkWSlI/O5IAqjHzdzxbXnc30g=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "ff442f5d1425feb86344c028298548024f21256d",
|
||||
"rev": "4d5d07d37ff773338e40a92088f45f4f88e509c8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"ref": "latest",
|
||||
"repo": "disko",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -82,11 +66,11 @@
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1747046372,
|
||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
||||
"lastModified": 1733328505,
|
||||
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
||||
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -95,39 +79,19 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-input-patcher": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"systems": [
|
||||
"systems"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1751871600,
|
||||
"narHash": "sha256-I4/2ekJrbRMhOpKfzgnlrN45nQj9YQmZnoSeAaRa1SU=",
|
||||
"owner": "jfly",
|
||||
"repo": "flake-input-patcher",
|
||||
"rev": "4ff068126d49829b106280738944bde91951d59d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "jfly",
|
||||
"repo": "flake-input-patcher",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
"nixpkgs-lib": [
|
||||
"nur",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754487366,
|
||||
"narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=",
|
||||
"lastModified": 1733312601,
|
||||
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18",
|
||||
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -156,28 +120,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"lanzaboote",
|
||||
"pre-commit-hooks-nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709087332,
|
||||
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"home-manager": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -185,92 +127,50 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754974548,
|
||||
"narHash": "sha256-XMjUjKD/QRPcqUnmSDczSYdw46SilnG0+wkho654DFM=",
|
||||
"owner": "nix-community",
|
||||
"lastModified": 1736504054,
|
||||
"narHash": "sha256-Mb0aIdOIg5ge0Lju1zogdAcfklRciR8G0NY6R423oek=",
|
||||
"owner": "karaolidis",
|
||||
"repo": "home-manager",
|
||||
"rev": "27a26be51ff0162a8f67660239f9407dba68d7c5",
|
||||
"rev": "baa0e7a14088ff1ed891afe4c6457faf40aa30a6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"owner": "karaolidis",
|
||||
"ref": "integration",
|
||||
"repo": "home-manager",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"lanzaboote": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-parts": [
|
||||
"flake-parts"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"pre-commit-hooks-nix": "pre-commit-hooks-nix",
|
||||
"rust-overlay": "rust-overlay"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754297745,
|
||||
"narHash": "sha256-aD6/scLN3L4ZszmNbhhd3JQ9Pzv1ScYFphz14wHinfs=",
|
||||
"owner": "nix-community",
|
||||
"repo": "lanzaboote",
|
||||
"rev": "892cbdca865d6b42f9c0d222fe309f7720259855",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "lanzaboote",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1754725699,
|
||||
"narHash": "sha256-iAcj9T/Y+3DBy2J0N+yF9XQQQ8IEb5swLFzs23CdP88=",
|
||||
"owner": "NixOS",
|
||||
"lastModified": 1738150270,
|
||||
"narHash": "sha256-GkH7I9LW0aFklGc3YxjaBW7TtJy5aWHE0rPBUuz35Hk=",
|
||||
"owner": "karaolidis",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "85dbfc7aaf52ecb755f87e577ddbe6dbbdbc1054",
|
||||
"rev": "e8e18ef6309d021fa600f5aa2665963d8cf76ab7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"owner": "karaolidis",
|
||||
"ref": "integration",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1753579242,
|
||||
"narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nur": {
|
||||
"inputs": {
|
||||
"flake-parts": [
|
||||
"flake-parts"
|
||||
],
|
||||
"flake-parts": "flake-parts",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
],
|
||||
"treefmt-nix": "treefmt-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1755067854,
|
||||
"narHash": "sha256-VP+2GVREkB7tg8vGBJ2yOlfwng+TEv45vZGvb4eV17E=",
|
||||
"lastModified": 1736500613,
|
||||
"narHash": "sha256-OCEXlRyOIMzxrhmnzoX32e241A7+Z+zsuyR7i6AG608=",
|
||||
"owner": "nix-community",
|
||||
"repo": "NUR",
|
||||
"rev": "3352304d8f256bb67b5f9662b3493b069b3cac25",
|
||||
"rev": "d51e847f68700c38f850a62c2b3e728864a38cde",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -279,143 +179,18 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nvidia-patch": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"utils": [
|
||||
"flake-utils"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1755069017,
|
||||
"narHash": "sha256-cTD5WfZRK2mwrSktlYcrk6DOEEkQbE1z78O16TF293c=",
|
||||
"owner": "icewind1991",
|
||||
"repo": "nvidia-patch-nixos",
|
||||
"rev": "d187885c14bdd8520d40f527134d536168f8d92b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "icewind1991",
|
||||
"repo": "nvidia-patch-nixos",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pre-commit-hooks-nix": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"lanzaboote",
|
||||
"flake-compat"
|
||||
],
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": [
|
||||
"lanzaboote",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1750779888,
|
||||
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"quadlet-nix": {
|
||||
"locked": {
|
||||
"lastModified": 1754008153,
|
||||
"narHash": "sha256-MYT1mDtSkiVg343agxgBFsnuNU3xS8vRy399JXX1Vw0=",
|
||||
"owner": "SEIAROTg",
|
||||
"repo": "quadlet-nix",
|
||||
"rev": "1b2d27d460d8c7e4da5ba44ede463b427160b5c4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "SEIAROTg",
|
||||
"repo": "quadlet-nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"ags": "ags",
|
||||
"astal": "astal",
|
||||
"disko": "disko",
|
||||
"flake-input-patcher": "flake-input-patcher",
|
||||
"flake-parts": "flake-parts",
|
||||
"flake-utils": "flake-utils",
|
||||
"home-manager": "home-manager",
|
||||
"lanzaboote": "lanzaboote",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nur": "nur",
|
||||
"nvidia-patch": "nvidia-patch",
|
||||
"quadlet-nix": "quadlet-nix",
|
||||
"sas": "sas",
|
||||
"secrets": "secrets",
|
||||
"sops-nix": "sops-nix",
|
||||
"spicetify-nix": "spicetify-nix",
|
||||
"systems": "systems",
|
||||
"treefmt-nix": "treefmt-nix"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"lanzaboote",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754189623,
|
||||
"narHash": "sha256-fstu5eb30UYwsxow0aQqkzxNxGn80UZjyehQVNVHuBk=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "c582ff7f0d8a7ea689ae836dfb1773f1814f472a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"sas": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1755341965,
|
||||
"narHash": "sha256-A6d2eaKp/AVr7pw6qY860XZMSSMr9suaoKEEKlpYHXo=",
|
||||
"ref": "refs/heads/main",
|
||||
"rev": "954fc8c375876169d0549548b0fdf905d3ebe06b",
|
||||
"revCount": 6,
|
||||
"type": "git",
|
||||
"url": "ssh://git@karaolidis.com/karaolidis/nix-sas.git"
|
||||
},
|
||||
"original": {
|
||||
"type": "git",
|
||||
"url": "ssh://git@karaolidis.com/karaolidis/nix-sas.git"
|
||||
}
|
||||
},
|
||||
"secrets": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1755243351,
|
||||
"narHash": "sha256-Oa7ASrkHUcNHMf/rXnVokLytKEqiM4X2C7R8gBSy/AM=",
|
||||
"ref": "refs/heads/main",
|
||||
"rev": "13b3145cbabcf1d042abdab931cec9042bccc771",
|
||||
"revCount": 32,
|
||||
"type": "git",
|
||||
"url": "ssh://git@karaolidis.com/karaolidis/nix-secrets.git"
|
||||
},
|
||||
"original": {
|
||||
"type": "git",
|
||||
"url": "ssh://git@karaolidis.com/karaolidis/nix-secrets.git"
|
||||
"systems": "systems"
|
||||
}
|
||||
},
|
||||
"sops-nix": {
|
||||
@@ -425,11 +200,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754988908,
|
||||
"narHash": "sha256-t+voe2961vCgrzPFtZxha0/kmFSHFobzF00sT8p9h0U=",
|
||||
"lastModified": 1736203741,
|
||||
"narHash": "sha256-eSjkBwBdQk+TZWFlLbclF2rAh4JxbGg8az4w/Lfe7f4=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "3223c7a92724b5d804e9988c6b447a0d09017d48",
|
||||
"rev": "c9c88f08e3ee495e888b8d7c8624a0b2519cb773",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -440,6 +215,7 @@
|
||||
},
|
||||
"spicetify-nix": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
@@ -448,11 +224,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754801101,
|
||||
"narHash": "sha256-oxWjZ/SfhCvHFNePZcUu+LcE5j4xxuIt/yaoaSvMZk0=",
|
||||
"lastModified": 1736482561,
|
||||
"narHash": "sha256-f4hvN4MF26NIYeFA/H1sVW6KU5X9/jy9l95WrMsNUIU=",
|
||||
"owner": "Gerg-L",
|
||||
"repo": "spicetify-nix",
|
||||
"rev": "fcbfc21572518c68317df992929b28df9a1d8468",
|
||||
"rev": "77fb1ae39e0f5c60a7d0bd6ce078b9c56e3356cb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -472,6 +248,7 @@
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"ref": "main",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -479,15 +256,16 @@
|
||||
"treefmt-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nur",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754847726,
|
||||
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
|
||||
"lastModified": 1733222881,
|
||||
"narHash": "sha256-JIPcz1PrpXUCbaccEnrcUS8jjEb/1vJbZz5KkobyFdM=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
|
||||
"rev": "49717b5af6f80172275d47a418c9719a31a78b53",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
154
flake.nix
154
flake.nix
@@ -1,14 +1,38 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
nixpkgs = {
|
||||
# --- Official
|
||||
# type = "github";
|
||||
# owner = "NixOS";
|
||||
# repo = "nixpkgs";
|
||||
# ref = "master";
|
||||
# --- Fork
|
||||
type = "github";
|
||||
owner = "karaolidis";
|
||||
repo = "nixpkgs";
|
||||
ref = "integration";
|
||||
# --- Local
|
||||
# url = "git+file:./submodules/nixpkgs";
|
||||
};
|
||||
|
||||
home-manager = {
|
||||
url = "github:nix-community/home-manager";
|
||||
# --- Official
|
||||
# type = "github";
|
||||
# owner = "nix-community"
|
||||
# repo = "home-manager";
|
||||
# --- Fork
|
||||
type = "github";
|
||||
owner = "karaolidis";
|
||||
repo = "home-manager";
|
||||
ref = "integration";
|
||||
# --- Local
|
||||
# url = "git+file:./submodules/home-manager";
|
||||
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
disko = {
|
||||
url = "github:nix-community/disko/latest";
|
||||
url = "github:nix-community/disko";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
@@ -17,55 +41,21 @@
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
lanzaboote = {
|
||||
url = "github:nix-community/lanzaboote";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
flake-parts.follows = "flake-parts";
|
||||
};
|
||||
};
|
||||
|
||||
# FIXME: https://github.com/NixOS/nix/issues/12281
|
||||
secrets = {
|
||||
url = "git+ssh://git@karaolidis.com/karaolidis/nix-secrets.git";
|
||||
flake = false;
|
||||
};
|
||||
|
||||
# FIXME: https://github.com/NixOS/nix/issues/12281
|
||||
sas = {
|
||||
url = "git+ssh://git@karaolidis.com/karaolidis/nix-sas.git";
|
||||
flake = false;
|
||||
systems = {
|
||||
type = "github";
|
||||
owner = "nix-systems";
|
||||
repo = "default";
|
||||
ref = "main";
|
||||
};
|
||||
|
||||
nur = {
|
||||
url = "github:nix-community/NUR";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
flake-parts.follows = "flake-parts";
|
||||
};
|
||||
};
|
||||
|
||||
treefmt-nix = {
|
||||
url = "github:numtide/treefmt-nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
flake-input-patcher = {
|
||||
url = "github:jfly/flake-input-patcher";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
systems.follows = "systems";
|
||||
};
|
||||
};
|
||||
|
||||
quadlet-nix.url = "github:SEIAROTg/quadlet-nix";
|
||||
|
||||
nvidia-patch = {
|
||||
url = "github:icewind1991/nvidia-patch-nixos";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
utils.follows = "flake-utils";
|
||||
};
|
||||
flake-utils = {
|
||||
url = "github:numtide/flake-utils";
|
||||
inputs.systems.follows = "systems";
|
||||
};
|
||||
|
||||
astal = {
|
||||
@@ -88,61 +78,49 @@
|
||||
systems.follows = "systems";
|
||||
};
|
||||
};
|
||||
|
||||
systems.url = "github:nix-systems/default";
|
||||
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
|
||||
flake-utils = {
|
||||
url = "github:numtide/flake-utils";
|
||||
inputs.systems.follows = "systems";
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs:
|
||||
let
|
||||
mkInputs =
|
||||
system:
|
||||
let
|
||||
patcher = inputs.flake-input-patcher.lib.${system};
|
||||
in
|
||||
patcher.patch inputs (import ./patches.nix { inherit patcher; });
|
||||
|
||||
mkNixosConfiguration =
|
||||
inputs: system: modules:
|
||||
inputs.nixpkgs.lib.nixosSystem {
|
||||
inherit system modules;
|
||||
{ self, nixpkgs, ... }@inputs:
|
||||
{
|
||||
nixosConfigurations = {
|
||||
installer = nixpkgs.lib.nixosSystem rec {
|
||||
system = "x86_64-linux";
|
||||
modules = [ ./hosts/installer ];
|
||||
specialArgs = { inherit inputs system; };
|
||||
};
|
||||
in
|
||||
(
|
||||
let
|
||||
system = "x86_64-linux";
|
||||
inputs = mkInputs system;
|
||||
|
||||
pkgs = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
config.allowUnfree = true;
|
||||
eirene = nixpkgs.lib.nixosSystem rec {
|
||||
system = "x86_64-linux";
|
||||
modules = [ ./hosts/eirene ];
|
||||
specialArgs = { inherit inputs system; };
|
||||
};
|
||||
|
||||
treefmt = inputs.treefmt-nix.lib.evalModule pkgs ./treefmt.nix;
|
||||
elara = nixpkgs.lib.nixosSystem rec {
|
||||
system = "x86_64-linux";
|
||||
modules = [ ./hosts/elara ];
|
||||
specialArgs = { inherit inputs system; };
|
||||
};
|
||||
};
|
||||
}
|
||||
// inputs.flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
{
|
||||
nixosConfigurations = {
|
||||
installer = mkNixosConfiguration inputs system [ ./hosts/installer ];
|
||||
himalia = mkNixosConfiguration inputs system [ ./hosts/himalia ];
|
||||
elara = mkNixosConfiguration inputs system [ ./hosts/elara ];
|
||||
jupiter = mkNixosConfiguration inputs system [ ./hosts/jupiter ];
|
||||
jupiter-vps = mkNixosConfiguration inputs system [ ./hosts/jupiter-vps ];
|
||||
devShells = {
|
||||
bun = import ./hosts/common/shells/bun { inherit pkgs; };
|
||||
c = import ./hosts/common/shells/c { inherit pkgs; };
|
||||
go = import ./hosts/common/shells/go { inherit pkgs; };
|
||||
java = import ./hosts/common/shells/java { inherit pkgs; };
|
||||
nix = import ./hosts/common/shells/nix { inherit pkgs; };
|
||||
nodejs = import ./hosts/common/shells/nodejs { inherit pkgs; };
|
||||
python = import ./hosts/common/shells/python { inherit pkgs; };
|
||||
};
|
||||
|
||||
devShells.${system} = import ./hosts/common/shells { inherit pkgs; };
|
||||
lib.${system} = import ./lib { inherit pkgs; };
|
||||
packages.${system} = import ./packages { inherit pkgs inputs system; };
|
||||
|
||||
formatter.${system} = treefmt.config.build.wrapper;
|
||||
checks.formatting.${system} = treefmt.config.build.check inputs.self;
|
||||
formatter = pkgs.nixfmt-rfc-style;
|
||||
}
|
||||
);
|
||||
|
||||
}
|
||||
|
4
hosts/.gitignore
vendored
4
hosts/.gitignore
vendored
@@ -1,2 +1,2 @@
|
||||
**/secrets/ssh_host_ed25519_key
|
||||
**/secrets/.decrypted~*
|
||||
*/secrets/ssh_host_ed25519_key
|
||||
*/secrets/.decrypted~*
|
||||
|
16
hosts/common/configs/system/backup/backup.completion.zsh
Normal file
16
hosts/common/configs/system/backup/backup.completion.zsh
Normal file
@@ -0,0 +1,16 @@
|
||||
_backup_completion() {
|
||||
local options=(
|
||||
'-m[Partition to mount for backup]:partition:($(_partitions))'
|
||||
'-b[Backup directory]:backup directory:_files -/'
|
||||
)
|
||||
local curcontext="$curcontext" state line
|
||||
typeset -A opt_args
|
||||
|
||||
_partitions() {
|
||||
lsblk -rno NAME | sed 's/^/\/dev\//'
|
||||
}
|
||||
|
||||
_arguments -s $options
|
||||
}
|
||||
|
||||
compdef _backup_completion backup
|
64
hosts/common/configs/system/backup/backup.sh
Normal file
64
hosts/common/configs/system/backup/backup.sh
Normal file
@@ -0,0 +1,64 @@
|
||||
if [[ "$EUID" -ne 0 ]]; then
|
||||
echo "Please run the script as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [-m partition] [-b backup_location]"
|
||||
exit 1
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
if [ -d "/persist.bak" ]; then btrfs -q subvolume delete "/persist.bak"; fi
|
||||
if [ -n "$backup_location" ]; then rm -f "$backup_location.tmp"; fi
|
||||
|
||||
if [ -n "$mount_location" ]; then
|
||||
if mount | grep -q "$mount_location"; then umount "$mount_location"; fi
|
||||
if [ -d "$mount_location" ]; then rmdir "$mount_location"; fi
|
||||
fi
|
||||
}
|
||||
|
||||
partition=""
|
||||
backup_location=""
|
||||
mount_location=""
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
while getopts "m:b:" opt; do
|
||||
case "$opt" in
|
||||
m) partition="$OPTARG" ;;
|
||||
b) backup_location="$OPTARG" ;;
|
||||
*) usage ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -n "$partition" ]; then
|
||||
mount_location=$(mktemp -d /mnt/backup.XXXXXX)
|
||||
echo "Mounting $partition at $mount_location..."
|
||||
mount "$partition" "$mount_location"
|
||||
fi
|
||||
|
||||
if [ -z "$mount_location" ]; then
|
||||
if [[ "$backup_location" != /* ]]; then
|
||||
backup_location="$(realpath "$backup_location")"
|
||||
fi
|
||||
else
|
||||
if [[ "$backup_location" = /* ]]; then
|
||||
echo "Error: When a partition is mounted, backup_location must be relative."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
backup_location="$(realpath "$mount_location/$backup_location")"
|
||||
fi
|
||||
|
||||
backup_location="$backup_location/$(hostname)-$(date +%Y-%m-%d-%H-%M-%S).btrfs.gz"
|
||||
|
||||
echo "Creating /persist snapshot..."
|
||||
btrfs -q subvolume snapshot -r "/persist" "/persist.bak"
|
||||
|
||||
echo "Creating backup at $backup_location..."
|
||||
btrfs -q send "/persist.bak" | gzip > "$backup_location.tmp"
|
||||
|
||||
mv "$backup_location.tmp" "$backup_location"
|
||||
|
||||
echo "Backup completed successfully!"
|
20
hosts/common/configs/system/backup/default.nix
Normal file
20
hosts/common/configs/system/backup/default.nix
Normal file
@@ -0,0 +1,20 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "backup";
|
||||
runtimeInputs = with pkgs; [
|
||||
btrfs-progs
|
||||
coreutils-full
|
||||
util-linux
|
||||
];
|
||||
text = builtins.readFile ./backup.sh;
|
||||
})
|
||||
];
|
||||
|
||||
home-manager.sharedModules = [
|
||||
{
|
||||
programs.zsh.initExtra = builtins.readFile ./backup.completion.zsh;
|
||||
}
|
||||
];
|
||||
}
|
@@ -8,11 +8,15 @@
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist/state"."/var/lib/bluetooth" = { };
|
||||
environment.persistence."/persist"."/var/lib/bluetooth" = { };
|
||||
|
||||
systemd.services.bluetooth.after = [
|
||||
config.environment.persistence."/persist/state"."/var/lib/bluetooth".mount
|
||||
config.environment.persistence."/persist"."/var/lib/bluetooth".mount
|
||||
];
|
||||
|
||||
home-manager.sharedModules = [ { services.mpris-proxy.enable = config.services.pipewire.enable; } ];
|
||||
home-manager.sharedModules = [
|
||||
{
|
||||
services.mpris-proxy.enable = config.services.pipewire.enable;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@@ -10,8 +10,11 @@
|
||||
timeout = 1;
|
||||
efi.canTouchEfiVariables = true;
|
||||
};
|
||||
|
||||
initrd.systemd.enable = true;
|
||||
kernelPackages = pkgs.linuxPackages_latest;
|
||||
supportedFilesystems = [
|
||||
"btrfs"
|
||||
"ntfs"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
@@ -1,33 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /persist/user.bak 0755 root root"
|
||||
"d /persist/state.bak 0755 root root"
|
||||
];
|
||||
|
||||
services.btrbk = {
|
||||
ioSchedulingClass = "idle";
|
||||
niceness = 19;
|
||||
instances = {
|
||||
persist-user = {
|
||||
onCalendar = "hourly";
|
||||
settings.volume."/persist" = {
|
||||
subvolume = "user";
|
||||
snapshot_dir = "user.bak";
|
||||
snapshot_preserve_min = "latest";
|
||||
snapshot_preserve = "48h 14d 4w 6m";
|
||||
};
|
||||
};
|
||||
|
||||
persist-state = {
|
||||
onCalendar = "daily";
|
||||
settings.volume."/persist" = {
|
||||
subvolume = "state";
|
||||
snapshot_dir = "state.bak";
|
||||
snapshot_preserve_min = "latest";
|
||||
snapshot_preserve = "7d 4w 3m";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@@ -1,14 +1,7 @@
|
||||
{ pkgs, ... }:
|
||||
{ ... }:
|
||||
{
|
||||
boot = {
|
||||
initrd.supportedFilesystems = [ "btrfs" ];
|
||||
supportedFilesystems = [ "btrfs" ];
|
||||
};
|
||||
|
||||
services.btrfs.autoScrub = {
|
||||
enable = true;
|
||||
interval = "weekly";
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [ compsize ];
|
||||
}
|
||||
|
4
hosts/common/configs/system/cpu/default.nix
Normal file
4
hosts/common/configs/system/cpu/default.nix
Normal file
@@ -0,0 +1,4 @@
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./options.nix ];
|
||||
}
|
@@ -1,7 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./cpu/options.nix
|
||||
./impermanence/options.nix
|
||||
];
|
||||
}
|
@@ -1,22 +0,0 @@
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
networking.networkmanager.dns = "dnsmasq";
|
||||
|
||||
environment.etc."NetworkManager/dnsmasq.d/10-bind-interfaces.conf".source =
|
||||
(pkgs.formats.keyValue {
|
||||
mkKeyValue =
|
||||
name: value:
|
||||
if value == true then
|
||||
name
|
||||
else if value == false then
|
||||
""
|
||||
else
|
||||
lib.generators.mkKeyValueDefault { } "=" name value;
|
||||
listsAsDuplicateKeys = true;
|
||||
}).generate
|
||||
"10-bind-interfaces.conf"
|
||||
{
|
||||
bind-interfaces = true;
|
||||
listen-address = [ "127.0.0.1" ];
|
||||
};
|
||||
}
|
29
hosts/common/configs/system/docker/default.nix
Normal file
29
hosts/common/configs/system/docker/default.nix
Normal file
@@ -0,0 +1,29 @@
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
virtualisation.docker = {
|
||||
enable = true;
|
||||
enableOnBoot = false;
|
||||
storageDriver = "btrfs";
|
||||
|
||||
daemon.settings = {
|
||||
experimental = true;
|
||||
ipv6 = true;
|
||||
fixed-cidr-v6 = "fd00::/80";
|
||||
};
|
||||
|
||||
autoPrune = {
|
||||
enable = true;
|
||||
flags = [ "--all" ];
|
||||
};
|
||||
};
|
||||
|
||||
environment = {
|
||||
persistence."/persist"."/var/lib/docker" = { };
|
||||
systemPackages = with pkgs; [ docker-compose ];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
services.docker.after = [ config.environment.persistence."/persist"."/var/lib/docker".mount ];
|
||||
sockets.docker.after = [ config.environment.persistence."/persist"."/var/lib/docker".mount ];
|
||||
};
|
||||
}
|
@@ -1,10 +1,5 @@
|
||||
{ pkgs, ... }:
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
man-pages
|
||||
man-pages-posix
|
||||
];
|
||||
|
||||
documentation = {
|
||||
enable = true;
|
||||
|
||||
|
@@ -1,14 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
environment.persistence."/persist/state"."/var/lib/fail2ban" = { };
|
||||
|
||||
services.fail2ban = {
|
||||
enable = true;
|
||||
bantime = "24h";
|
||||
bantime-increment = {
|
||||
enable = true;
|
||||
maxtime = "720h";
|
||||
overalljails = true;
|
||||
};
|
||||
};
|
||||
}
|
@@ -1,16 +1,48 @@
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [ ./options.nix ];
|
||||
|
||||
boot.initrd.systemd = {
|
||||
enable = true;
|
||||
|
||||
initrdBin = with pkgs; [
|
||||
coreutils
|
||||
util-linux
|
||||
findutils
|
||||
btrfs-progs
|
||||
];
|
||||
|
||||
services.impermanence = {
|
||||
description = "Rollback BTRFS subvolumes to a pristine state";
|
||||
wantedBy = [ "initrd.target" ];
|
||||
before = [ "sysroot.mount" ];
|
||||
after = [
|
||||
"cryptsetup.target"
|
||||
"local-fs-pre.target"
|
||||
];
|
||||
unitConfig.DefaultDependencies = false;
|
||||
serviceConfig.Type = "oneshot";
|
||||
environment.DEVICE = config.environment.impermanence.device;
|
||||
script = builtins.readFile ./scripts/wipe.sh;
|
||||
};
|
||||
};
|
||||
|
||||
# uuidgen -r | tr -d -
|
||||
# https://github.com/NixOS/nixpkgs/blob/master/nixos/doc/manual/administration/systemd-state.section.md
|
||||
# https://github.com/NixOS/nixpkgs/pull/286140/files
|
||||
# https://git.eisfunke.com/config/nixos/-/blob/e65e1dc21d06d07b454005762b177ef151f8bfb6/nixos/machine-id.nix
|
||||
sops.secrets.machineId.mode = "0444";
|
||||
sops.secrets."machineId".mode = "0444";
|
||||
|
||||
environment = {
|
||||
impermanence.enable = true;
|
||||
|
||||
etc.machine-id.source = pkgs.runCommandLocal "machine-id-link" { } ''
|
||||
ln -s ${config.sops.secrets.machineId.path} $out
|
||||
etc."machine-id".source = pkgs.runCommandLocal "machine-id-link" { } ''
|
||||
ln -s ${config.sops.secrets."machineId".path} $out
|
||||
'';
|
||||
|
||||
persistence."/persist" = {
|
||||
"/etc/nixos" = { };
|
||||
"/var/lib/nixos" = { };
|
||||
"/var/lib/systemd" = { };
|
||||
"/var/log" = { };
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@@ -1,7 +1,6 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
utils,
|
||||
...
|
||||
}:
|
||||
@@ -48,16 +47,12 @@ in
|
||||
with lib;
|
||||
with types;
|
||||
{
|
||||
impermanence = {
|
||||
enable = mkEnableOption "Impermanence";
|
||||
|
||||
device = mkOption {
|
||||
type = str;
|
||||
default = config.disko.devices.disk.main.content.partitions.root.content.content.device;
|
||||
description = ''
|
||||
LUKS BTRFS partition to wipe on boot.
|
||||
'';
|
||||
};
|
||||
impermanence.device = mkOption {
|
||||
type = str;
|
||||
default = config.disko.devices.disk.main.content.partitions.root.content.name;
|
||||
description = ''
|
||||
LUKS BTRFS partition to wipe on boot.
|
||||
'';
|
||||
};
|
||||
|
||||
persistence =
|
||||
@@ -121,19 +116,6 @@ in
|
||||
type = str;
|
||||
readOnly = true;
|
||||
};
|
||||
|
||||
create = mkOption {
|
||||
type = enum [
|
||||
"none"
|
||||
"file"
|
||||
"directory"
|
||||
];
|
||||
default = "none";
|
||||
description = ''
|
||||
Whether to create the file or directory
|
||||
in persistence if it does not exist.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
@@ -197,31 +179,8 @@ in
|
||||
let
|
||||
all = lib.lists.flatten (builtins.concatMap builtins.attrValues (builtins.attrValues cfg));
|
||||
in
|
||||
lib.mkIf config.environment.impermanence.enable {
|
||||
boot.initrd.systemd = {
|
||||
enable = true;
|
||||
|
||||
initrdBin = with pkgs; [
|
||||
coreutils
|
||||
util-linux
|
||||
findutils
|
||||
btrfs-progs
|
||||
];
|
||||
|
||||
services.impermanence = {
|
||||
description = "Rollback BTRFS subvolumes to a pristine state";
|
||||
wantedBy = [ "initrd.target" ];
|
||||
before = [ "sysroot.mount" ];
|
||||
after = [
|
||||
"cryptsetup.target"
|
||||
"local-fs-pre.target"
|
||||
];
|
||||
unitConfig.DefaultDependencies = false;
|
||||
serviceConfig.Type = "oneshot";
|
||||
environment.DEVICE = config.environment.impermanence.device;
|
||||
script = builtins.readFile ./scripts/wipe.sh;
|
||||
};
|
||||
};
|
||||
{
|
||||
fileSystems = builtins.mapAttrs (_: _: { neededForBoot = true; }) cfg;
|
||||
|
||||
systemd = {
|
||||
mounts = builtins.map (c: {
|
||||
@@ -233,11 +192,11 @@ in
|
||||
unitConfig.ConditionPathExists = [ (lib.strings.escape [ " " ] c.source) ];
|
||||
what = c.source;
|
||||
where = c.target;
|
||||
options = lib.strings.concatStringsSep "," [
|
||||
options = lib.strings.concatStringsSep "," ([
|
||||
"bind"
|
||||
"X-fstrim.notrim"
|
||||
"x-gvfs-hide"
|
||||
];
|
||||
]);
|
||||
}) all;
|
||||
|
||||
services = builtins.listToAttrs (
|
||||
@@ -270,7 +229,6 @@ in
|
||||
source=${lib.strings.escapeShellArg c._sourceRoot}
|
||||
target=${lib.strings.escapeShellArg c._targetRoot}
|
||||
path=${lib.strings.escapeShellArg c.path}
|
||||
create=${lib.strings.escapeShellArg c.create}
|
||||
|
||||
${builtins.readFile ./scripts/start.sh}
|
||||
'';
|
||||
@@ -278,7 +236,6 @@ in
|
||||
source=${lib.strings.escapeShellArg c._sourceRoot}
|
||||
target=${lib.strings.escapeShellArg c._targetRoot}
|
||||
path=${lib.strings.escapeShellArg c.path}
|
||||
create=${lib.strings.escapeShellArg c.create}
|
||||
|
||||
${builtins.readFile ./scripts/stop.sh}
|
||||
'';
|
||||
@@ -287,19 +244,6 @@ in
|
||||
);
|
||||
};
|
||||
|
||||
fileSystems = builtins.mapAttrs (_: _: { neededForBoot = true; }) cfg // {
|
||||
"/persist".neededForBoot = true;
|
||||
};
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/user"."/etc/nixos" = { };
|
||||
"/persist/state" = {
|
||||
"/var/lib/nixos" = { };
|
||||
"/var/lib/systemd" = { };
|
||||
"/var/log" = { };
|
||||
};
|
||||
};
|
||||
|
||||
assertions =
|
||||
let
|
||||
paths = builtins.map (c: c.path) all;
|
||||
|
@@ -1,49 +1,19 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
echo "Starting impermanence mount with source: $source, target: $target, path: $path, create: $create"
|
||||
echo "Starting impermanence mount with source: $source, target: $target, path: $path."
|
||||
|
||||
source_current="$source"
|
||||
target_current="$target"
|
||||
|
||||
IFS='/' read -ra parts <<< "$path"
|
||||
leaf="${parts[-1]}"
|
||||
IFS='/' read -ra path_parts <<< "$path"
|
||||
unset "path_parts[-1]"
|
||||
|
||||
for part in "${parts[@]}"; do
|
||||
source_current+="/$part"
|
||||
target_current+="/$part"
|
||||
for part in "${path_parts[@]}"; do
|
||||
source_current="$source_current/$part"
|
||||
target_current="$target_current/$part"
|
||||
|
||||
if [[ -e "$source_current" ]]; then
|
||||
read -r mode owner group <<< "$(stat -c '%a %u %g' "$source_current")"
|
||||
|
||||
if [[ -d "$source_current" ]]; then
|
||||
install -d -m "$mode" -o "$owner" -g "$group" "$target_current"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$part" != "$leaf" ]]; then
|
||||
echo "Error: $source_current is not a directory, persistence for $path can not be applied."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
install -m "$mode" -o "$owner" -g "$group" /dev/null "$target_current"
|
||||
fi
|
||||
|
||||
if [[ "$create" == "none" ]]; then
|
||||
if [[ ! -d "$source_current" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
if [[ -e "$target_current" ]]; then
|
||||
template="$target_current"
|
||||
else
|
||||
template="${source_current%/*}"
|
||||
fi
|
||||
|
||||
read -r mode owner group <<< "$(stat -c '%a %u %g' "$template")"
|
||||
|
||||
if [[ "$part" == "$leaf" && "$create" == "file" ]]; then
|
||||
install -m "$mode" -o "$owner" -g "$group" /dev/null "$source_current"
|
||||
else
|
||||
install -d -m "$mode" -o "$owner" -g "$group" "$source_current"
|
||||
fi
|
||||
read -r mode owner group <<< "$(stat -c '%a %u %g' "$source_current")"
|
||||
install -d -m "$mode" -o "$owner" -g "$group" "$target_current"
|
||||
done
|
||||
|
@@ -1,7 +1,4 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
echo "Stopping impermanence mount with source: $source, target: $target, path: $path, create: $create"
|
||||
echo "Stopping impermanence mount with source: $source, target: $target, path: $path."
|
||||
|
||||
source_current="$source"
|
||||
target_current="$target"
|
||||
|
@@ -1,5 +1,3 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
delete_subvolume_recursively() {
|
||||
IFS=$'\n'
|
||||
for i in $(btrfs subvolume list -o "$1" | cut -f 9- -d ' '); do
|
||||
@@ -8,27 +6,21 @@ delete_subvolume_recursively() {
|
||||
btrfs subvolume delete "$1"
|
||||
}
|
||||
|
||||
if [[ -z "$DEVICE" ]]; then
|
||||
echo "Error: DEVICE variable is not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p /mnt/btrfs
|
||||
mount "$DEVICE" /mnt/btrfs
|
||||
mount "/dev/mapper/$DEVICE" /mnt/btrfs
|
||||
|
||||
if [[ -e /mnt/btrfs/@ ]]; then
|
||||
mkdir -p /mnt/btrfs/@.bak
|
||||
timestamp=$(date --date="@$(stat -c %Y /mnt/btrfs/@)" "+%Y%m%dT%H%M")
|
||||
base="@.$timestamp"
|
||||
|
||||
target="/mnt/btrfs/@.bak/$base"
|
||||
if [[ -e "$target" ]]; then
|
||||
i=1
|
||||
while [[ -e "/mnt/btrfs/@.bak/${base}_$i" ]]; do
|
||||
(( i++ ))
|
||||
done
|
||||
target="/mnt/btrfs/@.bak/${base}_$i"
|
||||
fi
|
||||
|
||||
mv /mnt/btrfs/@ "$target"
|
||||
timestamp=$(date --date="@$(stat -c %Y /mnt/btrfs/@)" "+%Y-%m-%d_%H:%M:%S")
|
||||
mv /mnt/btrfs/@ "/mnt/btrfs/@.bak/$timestamp"
|
||||
fi
|
||||
|
||||
find /mnt/btrfs/@.bak/ -maxdepth 1 -mtime +7 | while IFS= read -r i; do
|
||||
find /mnt/btrfs/@.bak/ -maxdepth 1 -mtime +14 | while IFS= read -r i; do
|
||||
delete_subvolume_recursively "$i"
|
||||
done
|
||||
|
||||
|
52
hosts/common/configs/system/kubernetes/default.nix
Normal file
52
hosts/common/configs/system/kubernetes/default.nix
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
./options
|
||||
./secrets
|
||||
];
|
||||
|
||||
environment = {
|
||||
persistence."/persist" = {
|
||||
"/var/lib/containerd" = { };
|
||||
"/var/lib/kubernetes" = { };
|
||||
"/var/lib/kubelet" = { };
|
||||
"/var/lib/etcd" = { };
|
||||
};
|
||||
|
||||
etc."kubeconfig".source = config.services.kubernetes.kubeconfigs.admin;
|
||||
systemPackages = with pkgs; [ kubectl ];
|
||||
};
|
||||
|
||||
services = {
|
||||
kubernetes = {
|
||||
enable = true;
|
||||
|
||||
roles = [
|
||||
"master"
|
||||
"node"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
kube-addon-manager.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/kubernetes".mount
|
||||
];
|
||||
|
||||
kubelet.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/kubelet".mount
|
||||
];
|
||||
|
||||
kube-apiserver.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/kubernetes".mount
|
||||
];
|
||||
|
||||
etcd.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/etcd".mount
|
||||
];
|
||||
};
|
||||
}
|
@@ -0,0 +1,70 @@
|
||||
{ ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "Role";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "*" ];
|
||||
resources = [ "*" ];
|
||||
verbs = [ "*" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "RoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "Role";
|
||||
name = "system:kube-addon-manager";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "User";
|
||||
name = "system:kube-addon-manager";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager:cluster-lister";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "*" ];
|
||||
resources = [ "*" ];
|
||||
verbs = [ "list" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager:cluster-lister";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:kube-addon-manager:cluster-lister";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "User";
|
||||
name = "system:kube-addon-manager";
|
||||
}
|
||||
];
|
||||
}
|
||||
]
|
@@ -0,0 +1,206 @@
|
||||
{ config, ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "create-csrs-for-bootstrapping";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "Group";
|
||||
name = "system:bootstrappers";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
}
|
||||
];
|
||||
roleRef = {
|
||||
kind = "ClusterRole";
|
||||
name = "system:node-bootstrapper";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "auto-approve-csrs-for-group";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "Group";
|
||||
name = "system:bootstrappers";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
}
|
||||
];
|
||||
roleRef = {
|
||||
kind = "ClusterRole";
|
||||
name = "system:certificates.k8s.io:certificatesigningrequests:nodeclient";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "auto-approve-renewals-for-nodes";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "Group";
|
||||
name = "system:nodes";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
}
|
||||
];
|
||||
roleRef = {
|
||||
kind = "ClusterRole";
|
||||
name = "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "certificates.k8s.io" ];
|
||||
resources = [ "certificatesigningrequests" ];
|
||||
verbs = [
|
||||
"get"
|
||||
"list"
|
||||
"watch"
|
||||
];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "coordination.k8s.io" ];
|
||||
resources = [ "leases" ];
|
||||
verbs = [
|
||||
"create"
|
||||
"get"
|
||||
"update"
|
||||
];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "certificates.k8s.io" ];
|
||||
resources = [ "certificatesigningrequests/approval" ];
|
||||
verbs = [ "update" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "certificates.k8s.io" ];
|
||||
resourceNames = [ "kubernetes.io/kubelet-serving" ];
|
||||
resources = [ "signers" ];
|
||||
verbs = [ "approve" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "events" ];
|
||||
verbs = [ "create" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "kubelet-csr-approver";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "apps/v1";
|
||||
kind = "Deployment";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
replicas = 1;
|
||||
selector = {
|
||||
matchLabels = {
|
||||
app = "kubelet-csr-approver";
|
||||
};
|
||||
};
|
||||
template = {
|
||||
metadata = {
|
||||
labels = {
|
||||
app = "kubelet-csr-approver";
|
||||
};
|
||||
};
|
||||
spec = {
|
||||
serviceAccountName = "kubelet-csr-approver";
|
||||
containers = [
|
||||
{
|
||||
name = "kubelet-csr-approver";
|
||||
image = "postfinance/kubelet-csr-approver:latest";
|
||||
args = [
|
||||
"-metrics-bind-address"
|
||||
":8080"
|
||||
"-health-probe-bind-address"
|
||||
":8081"
|
||||
];
|
||||
livenessProbe = {
|
||||
httpGet = {
|
||||
path = "/healthz";
|
||||
port = 8081;
|
||||
};
|
||||
};
|
||||
resources = {
|
||||
requests = {
|
||||
cpu = "100m";
|
||||
memory = "200Mi";
|
||||
};
|
||||
};
|
||||
env = [
|
||||
{
|
||||
name = "PROVIDER_REGEX";
|
||||
value = "^${config.networking.fqdnOrHostName}$";
|
||||
}
|
||||
{
|
||||
name = "PROVIDER_IP_PREFIXES";
|
||||
value = "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.0/8,169.254.0.0/16,::1/128,fe80::/10,fc00::/7";
|
||||
}
|
||||
{
|
||||
name = "MAX_EXPIRATION_SEC";
|
||||
value = "31622400";
|
||||
}
|
||||
{
|
||||
name = "BYPASS_DNS_RESOLUTION";
|
||||
value = "true";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
tolerations = [
|
||||
{
|
||||
effect = "NoSchedule";
|
||||
key = "node-role.kubernetes.io/control-plane";
|
||||
operator = "Equal";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
]
|
@@ -0,0 +1,21 @@
|
||||
{ ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-apiserver:kubelet-api-admin";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:kubelet-api-admin";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "User";
|
||||
name = "system:kube-apiserver";
|
||||
}
|
||||
];
|
||||
}
|
||||
]
|
@@ -0,0 +1,289 @@
|
||||
{ ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
"rbac.authorization.k8s.io/aggregate-to-admin" = "true";
|
||||
"rbac.authorization.k8s.io/aggregate-to-edit" = "true";
|
||||
"rbac.authorization.k8s.io/aggregate-to-view" = "true";
|
||||
};
|
||||
name = "system:aggregated-metrics-reader";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "metrics.k8s.io" ];
|
||||
resources = [
|
||||
"pods"
|
||||
"nodes"
|
||||
];
|
||||
verbs = [
|
||||
"get"
|
||||
"list"
|
||||
"watch"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "system:metrics-server";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "nodes/metrics" ];
|
||||
verbs = [ "get" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [
|
||||
"pods"
|
||||
"nodes"
|
||||
];
|
||||
verbs = [
|
||||
"get"
|
||||
"list"
|
||||
"watch"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "RoleBinding";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server-auth-reader";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "Role";
|
||||
name = "extension-apiserver-authentication-reader";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server:system:auth-delegator";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:auth-delegator";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "system:metrics-server";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:metrics-server";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "v1";
|
||||
kind = "Service";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
ports = [
|
||||
{
|
||||
name = "https";
|
||||
port = 443;
|
||||
protocol = "TCP";
|
||||
targetPort = "https";
|
||||
}
|
||||
];
|
||||
selector = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "apps/v1";
|
||||
kind = "Deployment";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
};
|
||||
strategy = {
|
||||
rollingUpdate = {
|
||||
maxUnavailable = 0;
|
||||
};
|
||||
};
|
||||
template = {
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
};
|
||||
spec = {
|
||||
containers = [
|
||||
{
|
||||
args = [
|
||||
"--cert-dir=/tmp"
|
||||
"--secure-port=10250"
|
||||
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"
|
||||
"--kubelet-use-node-status-port"
|
||||
"--metric-resolution=15s"
|
||||
];
|
||||
image = "registry.k8s.io/metrics-server/metrics-server:v0.7.2";
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
livenessProbe = {
|
||||
failureThreshold = 3;
|
||||
httpGet = {
|
||||
path = "/livez";
|
||||
port = "https";
|
||||
scheme = "HTTPS";
|
||||
};
|
||||
periodSeconds = 10;
|
||||
};
|
||||
name = "metrics-server";
|
||||
ports = [
|
||||
{
|
||||
containerPort = 10250;
|
||||
name = "https";
|
||||
protocol = "TCP";
|
||||
}
|
||||
];
|
||||
readinessProbe = {
|
||||
failureThreshold = 3;
|
||||
httpGet = {
|
||||
path = "/readyz";
|
||||
port = "https";
|
||||
scheme = "HTTPS";
|
||||
};
|
||||
initialDelaySeconds = 20;
|
||||
periodSeconds = 10;
|
||||
};
|
||||
resources = {
|
||||
requests = {
|
||||
cpu = "100m";
|
||||
memory = "200Mi";
|
||||
};
|
||||
};
|
||||
securityContext = {
|
||||
allowPrivilegeEscalation = false;
|
||||
capabilities = {
|
||||
drop = [ "ALL" ];
|
||||
};
|
||||
readOnlyRootFilesystem = true;
|
||||
runAsNonRoot = true;
|
||||
runAsUser = 1000;
|
||||
seccompProfile = {
|
||||
type = "RuntimeDefault";
|
||||
};
|
||||
};
|
||||
volumeMounts = [
|
||||
{
|
||||
mountPath = "/tmp";
|
||||
name = "tmp-dir";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
nodeSelector = {
|
||||
"kubernetes.io/os" = "linux";
|
||||
};
|
||||
priorityClassName = "system-cluster-critical";
|
||||
serviceAccountName = "metrics-server";
|
||||
volumes = [
|
||||
{
|
||||
emptyDir = { };
|
||||
name = "tmp-dir";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "apiregistration.k8s.io/v1";
|
||||
kind = "APIService";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "v1beta1.metrics.k8s.io";
|
||||
};
|
||||
spec = {
|
||||
group = "metrics.k8s.io";
|
||||
groupPriorityMinimum = 100;
|
||||
insecureSkipTLSVerify = true;
|
||||
service = {
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
version = "v1beta1";
|
||||
versionPriority = 100;
|
||||
};
|
||||
}
|
||||
]
|
757
hosts/common/configs/system/kubernetes/options/default.nix
Normal file
757
hosts/common/configs/system/kubernetes/options/default.nix
Normal file
@@ -0,0 +1,757 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.kubernetes;
|
||||
in
|
||||
{
|
||||
options.services.kubernetes =
|
||||
with lib;
|
||||
with types;
|
||||
let
|
||||
mkCertOptions = name: {
|
||||
key = mkOption {
|
||||
description = "${name} key file.";
|
||||
type = path;
|
||||
};
|
||||
|
||||
crt = mkOption {
|
||||
description = "${name} certificate file.";
|
||||
type = path;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
enable = mkEnableOption "kubernetes";
|
||||
|
||||
lib = mkOption {
|
||||
description = "Kubernetes utility functions.";
|
||||
type = raw;
|
||||
readOnly = true;
|
||||
default = {
|
||||
mkKubeConfig =
|
||||
name: ca: cert: key:
|
||||
(pkgs.formats.json { }).generate "${name}-kubeconfig.json" {
|
||||
apiVersion = "v1";
|
||||
kind = "Config";
|
||||
clusters = [
|
||||
{
|
||||
name = "local";
|
||||
cluster = {
|
||||
server = cfg.apiserver._address;
|
||||
"certificate-authority" = ca;
|
||||
};
|
||||
}
|
||||
];
|
||||
users = [
|
||||
{
|
||||
inherit name;
|
||||
user = {
|
||||
"client-certificate" = cert;
|
||||
"client-key" = key;
|
||||
};
|
||||
}
|
||||
];
|
||||
contexts = [
|
||||
{
|
||||
name = "local";
|
||||
context = {
|
||||
cluster = "local";
|
||||
user = name;
|
||||
};
|
||||
}
|
||||
];
|
||||
current-context = "local";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
roles = mkOption {
|
||||
description = "Kubernetes role that this machine should take.";
|
||||
type = listOf (enum [
|
||||
"master"
|
||||
"node"
|
||||
]);
|
||||
default = [
|
||||
"master"
|
||||
"node"
|
||||
];
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes master server address.";
|
||||
type = str;
|
||||
default = "localhost";
|
||||
};
|
||||
|
||||
cidr = mkOption {
|
||||
description = "Kubernetes cluster CIDR.";
|
||||
type = str;
|
||||
default = "10.0.0.0/24";
|
||||
};
|
||||
|
||||
cas = {
|
||||
kubernetes = mkCertOptions "Kubernetes CA";
|
||||
frontProxy = mkCertOptions "Front Proxy CA";
|
||||
etcd = mkCertOptions "ETCD CA";
|
||||
};
|
||||
|
||||
certs = {
|
||||
apiserver = {
|
||||
server = mkCertOptions "Kubernetes API Server";
|
||||
kubeletClient = mkCertOptions "Kubernetes API Server Kubelet Client";
|
||||
etcdClient = mkCertOptions "Kubernetes API Server ETCD Client";
|
||||
};
|
||||
|
||||
etcd = {
|
||||
server = mkCertOptions "ETCD Server";
|
||||
peer = mkCertOptions "ETCD Peer";
|
||||
};
|
||||
|
||||
frontProxy = mkCertOptions "Front Proxy Client";
|
||||
|
||||
serviceAccount = {
|
||||
public = mkOption {
|
||||
description = "Service account public key file.";
|
||||
type = path;
|
||||
};
|
||||
|
||||
private = mkOption {
|
||||
description = "Service account private key file.";
|
||||
type = path;
|
||||
};
|
||||
};
|
||||
|
||||
accounts = {
|
||||
scheduler = mkCertOptions "Kubernetes Scheduler";
|
||||
controllerManager = mkCertOptions "Kubernetes Controller Manager";
|
||||
addonManager = mkCertOptions "Kubernetes Addon Manager";
|
||||
proxy = mkCertOptions "Kubernetes Proxy";
|
||||
admin = mkCertOptions "Kubernetes Admin";
|
||||
};
|
||||
};
|
||||
|
||||
kubeconfigs = mkOption {
|
||||
description = "Kubernetes kubeconfigs.";
|
||||
type = attrsOf path;
|
||||
default = { };
|
||||
};
|
||||
|
||||
apiserver = {
|
||||
_address = mkOption {
|
||||
description = "Kubernetes API server address.";
|
||||
internal = true;
|
||||
type = str;
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes API server listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "0.0.0.0";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes API server listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 6443;
|
||||
};
|
||||
|
||||
bootstrapTokenFile = mkOption {
|
||||
description = "Kubernetes API server bootstrap token file.";
|
||||
type = path;
|
||||
};
|
||||
};
|
||||
|
||||
kubelet = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes kubelet listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "0.0.0.0";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes kubelet listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 10250;
|
||||
};
|
||||
|
||||
taints =
|
||||
let
|
||||
taintOptions =
|
||||
{ name, ... }:
|
||||
{
|
||||
key = mkOption {
|
||||
description = "Taint key.";
|
||||
type = str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
value = mkOption {
|
||||
description = "Taint value.";
|
||||
type = str;
|
||||
};
|
||||
|
||||
effect = mkOption {
|
||||
description = "Taint effect.";
|
||||
type = enum [
|
||||
"NoSchedule"
|
||||
"PreferNoSchedule"
|
||||
"NoExecute"
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
mkOption {
|
||||
description = "Taints to apply to the node.";
|
||||
type = attrsOf (submodule taintOptions);
|
||||
default = { };
|
||||
};
|
||||
|
||||
bootstrapToken = mkOption {
|
||||
description = "Kubelet bootstrap token file.";
|
||||
type = path;
|
||||
};
|
||||
|
||||
seedImages = mkOption {
|
||||
description = "Container images to preload on the system.";
|
||||
type = listOf package;
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
cidr = mkOption {
|
||||
description = "Kubernetes pod CIDR.";
|
||||
type = str;
|
||||
default = "10.1.0.0/16";
|
||||
};
|
||||
};
|
||||
|
||||
scheduler = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes scheduler listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "127.0.0.1";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes scheduler listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 10251;
|
||||
};
|
||||
};
|
||||
|
||||
controllerManager = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes controller manager listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "127.0.0.1";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes controller manager listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 10252;
|
||||
};
|
||||
};
|
||||
|
||||
proxy = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes proxy listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "0.0.0.0";
|
||||
};
|
||||
};
|
||||
|
||||
addonManager = {
|
||||
addons = mkOption {
|
||||
description = "Kubernetes addons.";
|
||||
type = attrsOf (coercedTo (attrs) (a: [ a ]) (listOf attrs));
|
||||
default = { };
|
||||
};
|
||||
|
||||
bootstrapAddons = mkOption {
|
||||
description = "Kubernetes addons applied with cluster-admin permissions.";
|
||||
type = attrsOf (coercedTo (attrs) (a: [ a ]) (listOf attrs));
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable (
|
||||
lib.mkMerge [
|
||||
# master or node
|
||||
{
|
||||
services.kubernetes = {
|
||||
apiserver._address = "https://${cfg.address}:${toString cfg.apiserver.port}";
|
||||
|
||||
kubeconfigs.admin =
|
||||
cfg.lib.mkKubeConfig "admin" cfg.cas.kubernetes.crt cfg.certs.accounts.admin.crt
|
||||
cfg.certs.accounts.admin.key;
|
||||
|
||||
addonManager.bootstrapAddons = {
|
||||
addonManager = import ./addons/addon-manager { };
|
||||
bootstrap = import ./addons/bootstrap { inherit config; };
|
||||
kubeletApiAdmin = import ./addons/kubelet-api-admin { };
|
||||
metricsServer = import ./addons/metrics-server { };
|
||||
};
|
||||
};
|
||||
|
||||
boot = {
|
||||
kernel.sysctl = {
|
||||
"net.bridge.bridge-nf-call-iptables" = 1;
|
||||
"net.ipv4.ip_forward" = 1;
|
||||
"net.bridge.bridge-nf-call-ip6tables" = 1;
|
||||
};
|
||||
|
||||
kernelModules = [
|
||||
"br_netfilter"
|
||||
"overlay"
|
||||
];
|
||||
};
|
||||
|
||||
users = {
|
||||
users.kubernetes = {
|
||||
uid = config.ids.uids.kubernetes;
|
||||
group = "kubernetes";
|
||||
home = "/var/lib/kubernetes";
|
||||
homeMode = "755";
|
||||
createHome = true;
|
||||
description = "Kubernetes user";
|
||||
};
|
||||
|
||||
groups.kubernetes.gid = config.ids.gids.kubernetes;
|
||||
};
|
||||
|
||||
systemd = {
|
||||
targets.kubernetes = {
|
||||
description = "Kubernetes";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
tmpfiles.rules = [
|
||||
"d /opt/cni/bin 0755 root root -"
|
||||
"d /run/kubernetes 0755 kubernetes kubernetes -"
|
||||
];
|
||||
|
||||
services = {
|
||||
kubelet =
|
||||
let
|
||||
kubeletConfig = (pkgs.formats.json { }).generate "config.json" ({
|
||||
apiVersion = "kubelet.config.k8s.io/v1beta1";
|
||||
kind = "KubeletConfiguration";
|
||||
address = cfg.kubelet.address;
|
||||
port = cfg.kubelet.port;
|
||||
authentication = {
|
||||
x509.clientCAFile = cfg.cas.kubernetes.crt;
|
||||
webhook = {
|
||||
enabled = true;
|
||||
cacheTTL = "10s";
|
||||
};
|
||||
};
|
||||
authorization.mode = "Webhook";
|
||||
cgroupDriver = "systemd";
|
||||
hairpinMode = "hairpin-veth";
|
||||
registerNode = true;
|
||||
containerRuntimeEndpoint = "unix:///run/containerd/containerd.sock";
|
||||
failSwapOn = false;
|
||||
memorySwap.swapBehavior = "LimitedSwap";
|
||||
rotateCertificates = true;
|
||||
serverTLSBootstrap = true;
|
||||
featureGates = {
|
||||
RotateKubeletServerCertificate = true;
|
||||
NodeSwap = true;
|
||||
};
|
||||
healthzBindAddress = "127.0.0.1";
|
||||
healthzPort = 10248;
|
||||
});
|
||||
|
||||
taints = lib.strings.concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (
|
||||
lib.attrsets.mapAttrsToList (n: v: v) cfg.kubelet.taints
|
||||
);
|
||||
|
||||
generateKubeletBootstrapKubeconfig = lib.meta.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "kubelet-bootstrap-kubeconfig";
|
||||
runtimeInputs = with pkgs; [ coreutils ];
|
||||
text = ''
|
||||
mkdir -p /etc/kubernetes
|
||||
cat > /etc/kubernetes/bootstrap-kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: ${cfg.cas.kubernetes.crt}
|
||||
server: ${cfg.apiserver._address}
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet-bootstrap
|
||||
name: bootstrap
|
||||
current-context: bootstrap
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubelet-bootstrap
|
||||
user:
|
||||
token: $(<${cfg.kubelet.bootstrapToken})
|
||||
EOF
|
||||
'';
|
||||
}
|
||||
);
|
||||
|
||||
seedContainerImages = lib.meta.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "seed-container-images";
|
||||
runtimeInputs = with pkgs; [
|
||||
gzip
|
||||
containerd
|
||||
coreutils
|
||||
];
|
||||
text = ''
|
||||
${lib.strings.concatMapStrings (img: ''
|
||||
echo "Seeding container image: ${img}"
|
||||
${
|
||||
if (lib.hasSuffix "gz" img) then
|
||||
''zcat "${img}" | ctr -n k8s.io image import -''
|
||||
else
|
||||
''cat "${img}" | ctr -n k8s.io image import -''
|
||||
}
|
||||
'') cfg.kubelet.seedImages}
|
||||
'';
|
||||
}
|
||||
);
|
||||
in
|
||||
{
|
||||
description = "Kubernetes Kubelet";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [
|
||||
"network.target"
|
||||
"containerd.service"
|
||||
"kube-apisever.service"
|
||||
];
|
||||
path = with pkgs; [
|
||||
kubernetes
|
||||
coreutils
|
||||
util-linux
|
||||
git
|
||||
openssh
|
||||
iproute2
|
||||
ethtool
|
||||
iptables
|
||||
socat
|
||||
thin-provisioning-tools
|
||||
];
|
||||
preStart = ''
|
||||
${generateKubeletBootstrapKubeconfig}
|
||||
${seedContainerImages}
|
||||
'';
|
||||
script = lib.strings.concatStringsSep " " (
|
||||
[
|
||||
"kubelet"
|
||||
"--config=${kubeletConfig}"
|
||||
"--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubeconfig"
|
||||
"--kubeconfig=/var/lib/kubelet/kubeconfig"
|
||||
"--cert-dir=/var/lib/kubelet/pki"
|
||||
"--hostname-override=${lib.strings.toLower config.networking.fqdnOrHostName}"
|
||||
"--kubeconfig=/etc/kubernetes/bootstrap-kubeconfig"
|
||||
"--pod-infra-container-image=pause"
|
||||
"--root-dir=/var/lib/kubelet"
|
||||
]
|
||||
++ lib.lists.optional (taints != "") [
|
||||
"--register-with-taints=${taints}"
|
||||
]
|
||||
);
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
CPUAccounting = true;
|
||||
MemoryAccounting = true;
|
||||
Restart = "on-failure";
|
||||
RestartSec = "1000ms";
|
||||
WorkingDirectory = "/var/lib/kubelet";
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-proxy = {
|
||||
description = "Kubernetes Proxy";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [
|
||||
kubernetes
|
||||
iptables
|
||||
conntrack-tools
|
||||
];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-proxy"
|
||||
"--bind-address=${cfg.proxy.address}"
|
||||
"--cluster-cidr=${cfg.kubelet.cidr}"
|
||||
"--hostname-override=${lib.strings.toLower config.networking.fqdnOrHostName}"
|
||||
"--kubeconfig=${
|
||||
cfg.lib.mkKubeConfig "kube-proxy" cfg.cas.kubernetes.crt cfg.certs.accounts.proxy.crt
|
||||
cfg.certs.accounts.proxy.key
|
||||
}"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.enable = false;
|
||||
}
|
||||
|
||||
# only master
|
||||
(lib.mkIf (lib.all (m: m == "master") cfg.roles) {
|
||||
services.kubernetes.kubelet.taints = {
|
||||
unschedulable = {
|
||||
value = "true";
|
||||
effect = "NoSchedule";
|
||||
};
|
||||
"node-role.kubernetes.io/master" = {
|
||||
value = "true";
|
||||
effect = "NoSchedule";
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
# master
|
||||
(lib.mkIf (lib.elem "master" cfg.roles) {
|
||||
services = {
|
||||
etcd = {
|
||||
enable = true;
|
||||
name = cfg.address;
|
||||
keyFile = cfg.certs.etcd.server.key;
|
||||
certFile = cfg.certs.etcd.server.crt;
|
||||
trustedCaFile = cfg.cas.etcd.crt;
|
||||
peerKeyFile = cfg.certs.etcd.peer.key;
|
||||
peerCertFile = cfg.certs.etcd.peer.crt;
|
||||
peerTrustedCaFile = cfg.cas.etcd.crt;
|
||||
clientCertAuth = true;
|
||||
peerClientCertAuth = true;
|
||||
listenClientUrls = [ "https://0.0.0.0:2379" ];
|
||||
listenPeerUrls = [ "https://0.0.0.0:2380" ];
|
||||
advertiseClientUrls = [ "https://${cfg.address}:2379" ];
|
||||
initialCluster = [ "${cfg.address}=https://${cfg.address}:2380" ];
|
||||
initialAdvertisePeerUrls = [ "https://${cfg.address}:2380" ];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
kube-apiserver = {
|
||||
description = "Kubernetes API Server";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" ];
|
||||
path = with pkgs; [ kubernetes ];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-apiserver"
|
||||
"--allow-privileged=true"
|
||||
"--authorization-mode=RBAC,Node"
|
||||
"--bind-address=${cfg.apiserver.address}"
|
||||
"--secure-port=${toString cfg.apiserver.port}"
|
||||
"--client-ca-file=${cfg.cas.kubernetes.crt}"
|
||||
"--tls-cert-file=${cfg.certs.apiserver.server.crt}"
|
||||
"--tls-private-key-file=${cfg.certs.apiserver.server.key}"
|
||||
"--enable-admission-plugins=${
|
||||
lib.strings.concatStringsSep "," [
|
||||
"NamespaceLifecycle"
|
||||
"LimitRanger"
|
||||
"ServiceAccount"
|
||||
"ResourceQuota"
|
||||
"DefaultStorageClass"
|
||||
"DefaultTolerationSeconds"
|
||||
"NodeRestriction"
|
||||
]
|
||||
}"
|
||||
"--etcd-servers=${
|
||||
lib.strings.concatStringsSep "," [
|
||||
"https://${cfg.address}:2379"
|
||||
"https://127.0.0.1:2379"
|
||||
]
|
||||
}"
|
||||
"--etcd-cafile=${cfg.cas.etcd.crt}"
|
||||
"--etcd-certfile=${cfg.certs.apiserver.etcdClient.crt}"
|
||||
"--etcd-keyfile=${cfg.certs.apiserver.etcdClient.key}"
|
||||
"--kubelet-certificate-authority=${cfg.cas.kubernetes.crt}"
|
||||
"--kubelet-client-certificate=${cfg.certs.apiserver.kubeletClient.crt}"
|
||||
"--kubelet-client-key=${cfg.certs.apiserver.kubeletClient.key}"
|
||||
"--proxy-client-cert-file=${cfg.certs.frontProxy.crt}"
|
||||
"--proxy-client-key-file=${cfg.certs.frontProxy.key}"
|
||||
"--runtime-config=authentication.k8s.io/v1beta1=true"
|
||||
"--api-audiences=api,https://kubernetes.default.svc"
|
||||
"--service-account-issuer=https://kubernetes.default.svc"
|
||||
"--service-account-signing-key-file=${cfg.certs.serviceAccount.private}"
|
||||
"--service-account-key-file=${cfg.certs.serviceAccount.public}"
|
||||
"--service-cluster-ip-range=${cfg.cidr}"
|
||||
"--storage-backend=etcd3"
|
||||
"--enable-bootstrap-token-auth=true"
|
||||
"--token-auth-file=${cfg.apiserver.bootstrapTokenFile}"
|
||||
"--requestheader-client-ca-file=${cfg.cas.frontProxy.crt}"
|
||||
"--requestheader-allowed-names=front-proxy-client"
|
||||
"--requestheader-extra-headers-prefix=X-Remote-Extra-"
|
||||
"--requestheader-group-headers=X-Remote-Group"
|
||||
"--requestheader-username-headers=X-Remote-User"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-scheduler = {
|
||||
description = "Kubernetes Scheduler";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [ kubernetes ];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-scheduler"
|
||||
"--bind-address=${cfg.scheduler.address}"
|
||||
"--secure-port=${toString cfg.scheduler.port}"
|
||||
"--leader-elect=true"
|
||||
"--kubeconfig=${
|
||||
cfg.lib.mkKubeConfig "kube-scheduler" cfg.cas.kubernetes.crt cfg.certs.accounts.scheduler.crt
|
||||
cfg.certs.accounts.scheduler.key
|
||||
}"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-controller-manager = {
|
||||
description = "Kubernetes Controller Manager";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [ kubernetes ];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-controller-manager"
|
||||
"--allocate-node-cidrs=true"
|
||||
"--bind-address=${cfg.controllerManager.address}"
|
||||
"--secure-port=${toString cfg.controllerManager.port}"
|
||||
"--cluster-cidr=${cfg.kubelet.cidr}"
|
||||
"--kubeconfig=${
|
||||
cfg.lib.mkKubeConfig "kube-controller-manager" cfg.cas.kubernetes.crt
|
||||
cfg.certs.accounts.controllerManager.crt
|
||||
cfg.certs.accounts.controllerManager.key
|
||||
}"
|
||||
"--leader-elect=true"
|
||||
"--root-ca-file=${cfg.cas.kubernetes.crt}"
|
||||
"--service-account-private-key-file=${cfg.certs.serviceAccount.private}"
|
||||
"--use-service-account-credentials"
|
||||
"--client-ca-file=${cfg.cas.kubernetes.crt}"
|
||||
"--cluster-signing-cert-file=${cfg.cas.kubernetes.crt}"
|
||||
"--cluster-signing-key-file=${cfg.cas.kubernetes.key}"
|
||||
"--requestheader-client-ca-file=${cfg.cas.frontProxy.crt}"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 30;
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-addon-manager =
|
||||
let
|
||||
mkAddons =
|
||||
addons:
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: addon:
|
||||
(pkgs.formats.json { }).generate "${name}.json" {
|
||||
apiVersion = "v1";
|
||||
kind = "List";
|
||||
items = addon;
|
||||
}
|
||||
) addons;
|
||||
in
|
||||
{
|
||||
description = "Kubernetes Addon Manager";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
environment = {
|
||||
ADDON_PATH = pkgs.runCommand "kube-addons" { } ''
|
||||
mkdir -p $out
|
||||
${lib.strings.concatMapStringsSep "\n" (a: "ln -s ${a} $out/${baseNameOf a}") (
|
||||
mkAddons cfg.addonManager.addons
|
||||
)}
|
||||
'';
|
||||
KUBECONFIG =
|
||||
cfg.lib.mkKubeConfig "addon-manager" cfg.cas.kubernetes.crt cfg.certs.accounts.addonManager.crt
|
||||
cfg.certs.accounts.addonManager.key;
|
||||
};
|
||||
path = with pkgs; [
|
||||
kubernetes
|
||||
gawk
|
||||
];
|
||||
preStart = ''
|
||||
export KUBECONFIG=${cfg.kubeconfigs.admin}
|
||||
kubectl apply -f ${lib.strings.concatStringsSep " \\\n -f " (mkAddons cfg.addonManager.bootstrapAddons)}
|
||||
'';
|
||||
script = "kube-addons";
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
PermissionsStartOnly = true;
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 10;
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
# node
|
||||
(lib.mkIf (lib.elem "node" cfg.roles) {
|
||||
virtualisation.containerd = {
|
||||
enable = true;
|
||||
settings = {
|
||||
version = 2;
|
||||
root = "/var/lib/containerd";
|
||||
state = "/run/containerd";
|
||||
oom_score = 0;
|
||||
grpc.address = "/run/containerd/containerd.sock";
|
||||
plugins."io.containerd.grpc.v1.cri" = {
|
||||
containerd.runtimes.runc = {
|
||||
runtime_type = "io.containerd.runc.v2";
|
||||
options.SystemdCgroup = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
293
hosts/common/configs/system/kubernetes/secrets/default.nix
Normal file
293
hosts/common/configs/system/kubernetes/secrets/default.nix
Normal file
@@ -0,0 +1,293 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
sops.secrets = {
|
||||
"kubernetes/ca/kubernetes/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/kubernetes/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/front-proxy/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/front-proxy/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/etcd/crt" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/etcd/key" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/server/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/server/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/etcd-client/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/etcd-client/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/kubelet-client/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/kubelet-client/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/front-proxy/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/front-proxy/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/server/crt" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/server/key" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/peer/crt" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/peer/key" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/sa/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/sa/pub" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/scheduler/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/scheduler/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/controller-manager/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/controller-manager/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/addon-manager/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/addon-manager/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/proxy/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/proxy/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/admin/crt" = {
|
||||
group = "kubernetes";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/admin/key" = {
|
||||
group = "kubernetes";
|
||||
};
|
||||
|
||||
"kubernetes/token/kubelet-bootstrap/token" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/token/kubelet-bootstrap/csv" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes = {
|
||||
cas = {
|
||||
kubernetes = {
|
||||
key = config.sops.secrets."kubernetes/ca/kubernetes/key".path;
|
||||
crt = config.sops.secrets."kubernetes/ca/kubernetes/crt".path;
|
||||
};
|
||||
|
||||
frontProxy = {
|
||||
key = config.sops.secrets."kubernetes/ca/front-proxy/key".path;
|
||||
crt = config.sops.secrets."kubernetes/ca/front-proxy/crt".path;
|
||||
};
|
||||
|
||||
etcd = {
|
||||
key = config.sops.secrets."kubernetes/ca/etcd/key".path;
|
||||
crt = config.sops.secrets."kubernetes/ca/etcd/crt".path;
|
||||
};
|
||||
};
|
||||
|
||||
certs = {
|
||||
apiserver = {
|
||||
server = {
|
||||
key = config.sops.secrets."kubernetes/cert/apiserver/server/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/apiserver/server/crt".path;
|
||||
};
|
||||
|
||||
etcdClient = {
|
||||
key = config.sops.secrets."kubernetes/cert/apiserver/etcd-client/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/apiserver/etcd-client/crt".path;
|
||||
};
|
||||
|
||||
kubeletClient = {
|
||||
key = config.sops.secrets."kubernetes/cert/apiserver/kubelet-client/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/apiserver/kubelet-client/crt".path;
|
||||
};
|
||||
};
|
||||
|
||||
etcd = {
|
||||
server = {
|
||||
key = config.sops.secrets."kubernetes/cert/etcd/server/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/etcd/server/crt".path;
|
||||
};
|
||||
|
||||
peer = {
|
||||
key = config.sops.secrets."kubernetes/cert/etcd/peer/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/etcd/peer/crt".path;
|
||||
};
|
||||
};
|
||||
|
||||
frontProxy = {
|
||||
key = config.sops.secrets."kubernetes/cert/front-proxy/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/front-proxy/crt".path;
|
||||
};
|
||||
|
||||
serviceAccount = {
|
||||
private = config.sops.secrets."kubernetes/cert/sa/key".path;
|
||||
public = config.sops.secrets."kubernetes/cert/sa/pub".path;
|
||||
};
|
||||
|
||||
accounts = {
|
||||
scheduler = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/scheduler/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/scheduler/crt".path;
|
||||
};
|
||||
|
||||
controllerManager = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/controller-manager/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/controller-manager/crt".path;
|
||||
};
|
||||
|
||||
addonManager = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/addon-manager/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/addon-manager/crt".path;
|
||||
};
|
||||
|
||||
proxy = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/proxy/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/proxy/crt".path;
|
||||
};
|
||||
|
||||
admin = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/admin/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/admin/crt".path;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
kubelet.bootstrapToken = config.sops.secrets."kubernetes/token/kubelet-bootstrap/token".path;
|
||||
|
||||
apiserver.bootstrapTokenFile = config.sops.secrets."kubernetes/token/kubelet-bootstrap/csv".path;
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
kubelet.after = [ "sops-nix.service" ];
|
||||
kube-apiserver.after = [ "sops-nix.service" ];
|
||||
kube-controller-manager.after = [ "sops-nix.service" ];
|
||||
kube-scheduler.after = [ "sops-nix.service" ];
|
||||
kube-proxy.after = [ "sops-nix.service" ];
|
||||
kube-addon-manager.after = [ "sops-nix.service" ];
|
||||
etcd.after = [ "sops-nix.service" ];
|
||||
};
|
||||
}
|
207
hosts/common/configs/system/kubernetes/secrets/generate-secrets.sh
Executable file
207
hosts/common/configs/system/kubernetes/secrets/generate-secrets.sh
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/env -S nix shell nixpkgs#openssl nixpkgs#yq-go nixpkgs#sops -c bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
generate_ca() {
|
||||
local target_dir=$1
|
||||
local ca_name=$2
|
||||
local ca_days=$3
|
||||
local cn=$4
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local ca_key=${target_dir}/${ca_name}.key
|
||||
local ca_cert=${target_dir}/${ca_name}.crt
|
||||
|
||||
openssl genrsa -out "${ca_key}" 2048
|
||||
openssl req -x509 -new -nodes -key "${ca_key}" -days "${ca_days}" -out "${ca_cert}" -subj "/CN=${cn}"
|
||||
}
|
||||
|
||||
generate_alt_names() {
|
||||
local hosts=("$@")
|
||||
local dns=0
|
||||
local ip=0
|
||||
local alt_names=""
|
||||
|
||||
for host in "${hosts[@]}"; do
|
||||
if [[ ${host} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
alt_names="${alt_names}IP.${ip} = ${host}\n"
|
||||
((ip++))
|
||||
else
|
||||
alt_names="${alt_names}DNS.${dns} = ${host}\n"
|
||||
((dns++))
|
||||
fi
|
||||
done
|
||||
|
||||
echo -e "${alt_names}"
|
||||
}
|
||||
|
||||
generate_cnf() {
|
||||
local target_dir=$1
|
||||
local cnf_name=$2
|
||||
local cn=$3
|
||||
local hosts=("${@:4}")
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local cnf_file=${target_dir}/${cnf_name}.cnf
|
||||
|
||||
cat <<EOF > "${cnf_file}"
|
||||
[req]
|
||||
prompt = no
|
||||
|
||||
[ req_ext ]
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[ alt_names ]
|
||||
$(generate_alt_names "${hosts[@]}")
|
||||
|
||||
[ v3_ext ]
|
||||
authorityKeyIdentifier=keyid,issuer:always
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage=keyEncipherment,dataEncipherment,digitalSignature
|
||||
extendedKeyUsage=serverAuth,clientAuth
|
||||
subjectAltName=@alt_names
|
||||
EOF
|
||||
}
|
||||
|
||||
generate_crt() {
|
||||
local target_dir=$1
|
||||
local cert_name=$2
|
||||
local cert_days=$3
|
||||
local cn=$4
|
||||
local o=$5
|
||||
local ca_key=$6
|
||||
local ca_cert=$7
|
||||
local hosts=("${@:8}")
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local cert_key=${target_dir}/${cert_name}.key
|
||||
local cert_csr=${target_dir}/${cert_name}.csr
|
||||
local cert_cert=${target_dir}/${cert_name}.crt
|
||||
|
||||
openssl genrsa -out "${cert_key}" 2048
|
||||
|
||||
local subject="/CN=${cn}"
|
||||
if [ -n "${o}" ]; then
|
||||
subject="${subject}/O=${o}"
|
||||
fi
|
||||
|
||||
if [ -n "${hosts}" ]; then
|
||||
generate_cnf "${target_dir}" "${cert_name}" "${cn}" "${hosts[@]}"
|
||||
openssl req -new -key "${cert_key}" -out "${cert_csr}" -subj "${subject}" -config "${target_dir}"/"${cert_name}".cnf
|
||||
openssl x509 -req -in "${cert_csr}" -CA "${ca_cert}" -CAkey "${ca_key}" -CAcreateserial -out "${cert_cert}" -days "${cert_days}" -extfile "${target_dir}"/"${cert_name}".cnf -extensions v3_ext
|
||||
else
|
||||
openssl req -new -key "${cert_key}" -out "${cert_csr}" -subj "${subject}"
|
||||
openssl x509 -req -in "${cert_csr}" -CA "${ca_cert}" -CAkey "${ca_key}" -CAcreateserial -out "${cert_cert}" -days "${cert_days}"
|
||||
fi
|
||||
}
|
||||
|
||||
generate_key_pair() {
|
||||
local target_dir=$1
|
||||
local key_name=$2
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local private_key=${target_dir}/${key_name}.key
|
||||
local public_key=${target_dir}/${key_name}.pub
|
||||
|
||||
openssl genrsa -out "${private_key}" 2048
|
||||
openssl rsa -in "${private_key}" -pubout -out "${public_key}"
|
||||
}
|
||||
|
||||
generate_auth_token() {
|
||||
local target_dir=$1
|
||||
local token_name=$2
|
||||
local user=$3
|
||||
local id=$4
|
||||
local groups=$5
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local token_file="${target_dir}/${token_name}.token"
|
||||
local token_auth_file="${target_dir}/${token_name}.csv"
|
||||
|
||||
token="$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')"
|
||||
echo "${token}" > "${token_file}"
|
||||
echo "${token},${user},${id},\"${groups}\"" > "${token_auth_file}"
|
||||
}
|
||||
|
||||
DEFAULT_CA_DAYS=3650
|
||||
|
||||
if [[ -z "$SOPS_AGE_KEY_FILE" ]]; then
|
||||
echo "Please set the SOPS_AGE_KEY_FILE environment variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
hostname=${1:-$(hostname)}
|
||||
|
||||
if [ -z "${hostname}" ]; then
|
||||
echo "Usage: $0 [hostname]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
generate_ca out/ca kubernetes ${DEFAULT_CA_DAYS} kubernetes-ca ""
|
||||
generate_ca out/ca front-proxy ${DEFAULT_CA_DAYS} kubernetes-front-proxy-ca ""
|
||||
generate_ca out/ca etcd ${DEFAULT_CA_DAYS} etcd-ca ""
|
||||
|
||||
generate_crt out/cert/apiserver server ${DEFAULT_CA_DAYS} kube-apiserver "" out/ca/kubernetes.key out/ca/kubernetes.crt "kubernetes" "kubernetes.default" "kubernetes.default.svc" "kubernetes.default.svc.cluster" "kubernetes.default.svc.cluster.local" "localhost" "10.0.0.1" "127.0.0.1"
|
||||
generate_crt out/cert/apiserver etcd-client ${DEFAULT_CA_DAYS} kube-apiserver-etcd-client "" out/ca/etcd.key out/ca/etcd.crt ""
|
||||
generate_crt out/cert/apiserver kubelet-client ${DEFAULT_CA_DAYS} kube-apiserver-kubelet-client "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/etcd server ${DEFAULT_CA_DAYS} kube-etcd "" out/ca/etcd.key out/ca/etcd.crt "etcd.local" "etcd.cluster.local" "localhost" "127.0.0.1"
|
||||
generate_crt out/cert/etcd peer ${DEFAULT_CA_DAYS} kube-etcd-peer "" out/ca/etcd.key out/ca/etcd.crt "etcd.local" "etcd.cluster.local" "localhost" "127.0.0.1"
|
||||
generate_crt out/cert front-proxy ${DEFAULT_CA_DAYS} front-proxy-client "" out/ca/front-proxy.key out/ca/front-proxy.crt ""
|
||||
|
||||
generate_key_pair out/cert sa
|
||||
|
||||
generate_crt out/cert/accounts scheduler ${DEFAULT_CA_DAYS} system:kube-scheduler "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts controller-manager ${DEFAULT_CA_DAYS} system:kube-controller-manager "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts addon-manager ${DEFAULT_CA_DAYS} system:kube-addon-manager "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts proxy ${DEFAULT_CA_DAYS} system:kube-proxy "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts admin ${DEFAULT_CA_DAYS} kubernetes-admin system:masters out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts users ${DEFAULT_CA_DAYS} kubernetes-users system:masters out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
|
||||
generate_auth_token out/token kubelet-bootstrap "kubelet-bootstrap" 10001 "system:bootstrappers"
|
||||
|
||||
sops_config="../../../../../$(hostname)/secrets/sops.yaml"
|
||||
secrets_file="../../../../../$(hostname)/secrets/secrets.yaml"
|
||||
decrypted_secrets_file="../../../../../$(hostname)/secrets/.decrypted~secrets.yaml"
|
||||
sops -d "${secrets_file}" > "${decrypted_secrets_file}"
|
||||
|
||||
yq -i '
|
||||
del(.kubernetes) |
|
||||
.kubernetes.ca.kubernetes.crt = load_str("out/ca/kubernetes.crt") |
|
||||
.kubernetes.ca.kubernetes.key = load_str("out/ca/kubernetes.key") |
|
||||
.kubernetes.ca.front-proxy.crt = load_str("out/ca/front-proxy.crt") |
|
||||
.kubernetes.ca.front-proxy.key = load_str("out/ca/front-proxy.key") |
|
||||
.kubernetes.ca.etcd.crt = load_str("out/ca/etcd.crt") |
|
||||
.kubernetes.ca.etcd.key = load_str("out/ca/etcd.key") |
|
||||
.kubernetes.cert.apiserver.server.crt = load_str("out/cert/apiserver/server.crt") |
|
||||
.kubernetes.cert.apiserver.server.key = load_str("out/cert/apiserver/server.key") |
|
||||
.kubernetes.cert.apiserver.etcd-client.crt = load_str("out/cert/apiserver/etcd-client.crt") |
|
||||
.kubernetes.cert.apiserver.etcd-client.key = load_str("out/cert/apiserver/etcd-client.key") |
|
||||
.kubernetes.cert.apiserver.kubelet-client.crt = load_str("out/cert/apiserver/kubelet-client.crt") |
|
||||
.kubernetes.cert.apiserver.kubelet-client.key = load_str("out/cert/apiserver/kubelet-client.key") |
|
||||
.kubernetes.cert.etcd.server.crt = load_str("out/cert/etcd/server.crt") |
|
||||
.kubernetes.cert.etcd.server.key = load_str("out/cert/etcd/server.key") |
|
||||
.kubernetes.cert.etcd.peer.crt = load_str("out/cert/etcd/peer.crt") |
|
||||
.kubernetes.cert.etcd.peer.key = load_str("out/cert/etcd/peer.key") |
|
||||
.kubernetes.cert.front-proxy.crt = load_str("out/cert/front-proxy.crt") |
|
||||
.kubernetes.cert.front-proxy.key = load_str("out/cert/front-proxy.key") |
|
||||
.kubernetes.cert.sa.key = load_str("out/cert/sa.key") |
|
||||
.kubernetes.cert.sa.pub = load_str("out/cert/sa.pub") |
|
||||
.kubernetes.cert.accounts.scheduler.crt = load_str("out/cert/accounts/scheduler.crt") |
|
||||
.kubernetes.cert.accounts.scheduler.key = load_str("out/cert/accounts/scheduler.key") |
|
||||
.kubernetes.cert.accounts.controller-manager.crt = load_str("out/cert/accounts/controller-manager.crt") |
|
||||
.kubernetes.cert.accounts.controller-manager.key = load_str("out/cert/accounts/controller-manager.key") |
|
||||
.kubernetes.cert.accounts.addon-manager.crt = load_str("out/cert/accounts/addon-manager.crt") |
|
||||
.kubernetes.cert.accounts.addon-manager.key = load_str("out/cert/accounts/addon-manager.key") |
|
||||
.kubernetes.cert.accounts.proxy.crt = load_str("out/cert/accounts/proxy.crt") |
|
||||
.kubernetes.cert.accounts.proxy.key = load_str("out/cert/accounts/proxy.key") |
|
||||
.kubernetes.cert.accounts.admin.crt = load_str("out/cert/accounts/admin.crt") |
|
||||
.kubernetes.cert.accounts.admin.key = load_str("out/cert/accounts/admin.key") |
|
||||
.kubernetes.cert.accounts.users.crt = load_str("out/cert/accounts/users.crt") |
|
||||
.kubernetes.cert.accounts.users.key = load_str("out/cert/accounts/users.key") |
|
||||
.kubernetes.token.kubelet-bootstrap.token = load_str("out/token/kubelet-bootstrap.token") |
|
||||
.kubernetes.token.kubelet-bootstrap.csv = load_str("out/token/kubelet-bootstrap.csv")
|
||||
' "${decrypted_secrets_file}"
|
||||
|
||||
sops --config "${sops_config}" -e "${decrypted_secrets_file}" > "${secrets_file}"
|
||||
rm -rf ${decrypted_secrets_file} out
|
@@ -1,22 +0,0 @@
|
||||
{
|
||||
inputs,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [ inputs.lanzaboote.nixosModules.lanzaboote ];
|
||||
|
||||
environment = {
|
||||
persistence."/persist/state"."/var/lib/sbctl" = { };
|
||||
|
||||
systemPackages = with pkgs; [ sbctl ];
|
||||
};
|
||||
|
||||
boot.loader.systemd-boot.enable = lib.mkForce false;
|
||||
|
||||
boot.lanzaboote = {
|
||||
enable = true;
|
||||
pkiBundle = "/var/lib/sbctl";
|
||||
};
|
||||
}
|
@@ -1,9 +1,4 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
libvirtd = {
|
||||
@@ -17,20 +12,16 @@
|
||||
spiceUSBRedirection.enable = true;
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
libvirtd.after = [ "NetworkManager.service" ];
|
||||
|
||||
libvirtd-network-default = {
|
||||
description = "Start Default Virtual Network for Libvirt";
|
||||
script = "${config.virtualisation.libvirtd.package}/bin/virsh net-start default";
|
||||
preStop = "${config.virtualisation.libvirtd.package}/bin/virsh net-destroy default";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
wantedBy = [ "libvirtd.service" ];
|
||||
after = [ "libvirtd.service" ];
|
||||
systemd.services.libvirtd-network-default = {
|
||||
description = "Start Default Virtual Network for Libvirt";
|
||||
script = "${config.virtualisation.libvirtd.package}/bin/virsh net-start default";
|
||||
preStop = "${config.virtualisation.libvirtd.package}/bin/virsh net-destroy default";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
wantedBy = [ "libvirtd.service" ];
|
||||
after = [ "libvirtd.service" ];
|
||||
};
|
||||
|
||||
environment = {
|
||||
@@ -41,7 +32,7 @@
|
||||
"ovmf/edk2-i386-vars.fd".source =
|
||||
"${config.virtualisation.libvirtd.qemu.package}/share/qemu/edk2-i386-vars.fd";
|
||||
};
|
||||
persistence."/persist/state"."/var/lib/libvirt" = { };
|
||||
persistence."/persist"."/var/lib/libvirt" = { };
|
||||
};
|
||||
|
||||
programs.virt-manager.enable = true;
|
||||
|
10
hosts/common/configs/system/networking/default.nix
Normal file
10
hosts/common/configs/system/networking/default.nix
Normal file
@@ -0,0 +1,10 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
networking.networkmanager.enable = true;
|
||||
|
||||
environment.persistence."/persist"."/etc/NetworkManager/system-connections" = { };
|
||||
|
||||
systemd.services.NetworkManager.after = [
|
||||
config.environment.persistence."/persist"."/etc/NetworkManager/system-connections".mount
|
||||
];
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
networking.networkmanager.enable = true;
|
||||
|
||||
environment.persistence."/persist/state"."/etc/NetworkManager/system-connections" = { };
|
||||
|
||||
systemd.services.NetworkManager.after = [
|
||||
config.environment.persistence."/persist/state"."/etc/NetworkManager/system-connections".mount
|
||||
];
|
||||
}
|
@@ -1,5 +1,3 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
if [[ "${EUID}" -ne 0 ]]; then
|
||||
echo "Please run the script as root."
|
||||
exit 1
|
||||
@@ -18,8 +16,13 @@ if [[ -e /mnt/btrfs && -n $(mountpoint -q /mnt/btrfs) ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$DEVICE" ]]; then
|
||||
echo "Error: DEVICE variable is not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p /mnt/btrfs
|
||||
mount "$DEVICE" /mnt/btrfs
|
||||
mount "/dev/mapper/$DEVICE" /mnt/btrfs
|
||||
|
||||
if [[ -e /mnt/btrfs/@.bak ]]; then
|
||||
if [[ -n "$(ls -A /mnt/btrfs/@.bak)" ]]; then
|
||||
|
@@ -8,7 +8,6 @@
|
||||
iputils
|
||||
jq
|
||||
nix
|
||||
sops
|
||||
inputs.disko.packages.${system}.disko
|
||||
];
|
||||
text = builtins.readFile ./install.sh;
|
||||
@@ -16,6 +15,8 @@
|
||||
];
|
||||
|
||||
home-manager.sharedModules = [
|
||||
{ programs.zsh.initContent = builtins.readFile ./install.completion.zsh; }
|
||||
{
|
||||
programs.zsh.initExtra = builtins.readFile ./install.completion.zsh;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@@ -4,7 +4,7 @@ _nix-install_completion() {
|
||||
'-m[Mode: 'install' or 'repair']:mode:(install repair)'
|
||||
'-h[Host to configure]:host:($(_list_hosts))'
|
||||
'-k[Key file to copy to user config]:key:($(_list_keys))'
|
||||
'-s[Enroll secure boot keys on current device]'
|
||||
'-p[LUKS password file to use for encryption]:password_file:_files'
|
||||
'-c[Copy configuration to target]'
|
||||
'-r[Reboot after completion]'
|
||||
)
|
||||
|
@@ -1,14 +1,12 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 flake -m install|repair -h host [-k key] [-p password_file] [-s] [-c] [-r]"
|
||||
echo "Usage: $0 flake -m install|repair -h host [-k key] [-p password_file] [-c] [-r]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " flake Directory containing the flake.nix file."
|
||||
echo " -m mode Mode: 'install' or 'repair'."
|
||||
echo " -h host Host to configure."
|
||||
echo " -k key Key file to copy to user config."
|
||||
echo " -s Enroll secure boot keys on current device."
|
||||
echo " -p password_file LUKS password file to use for encryption."
|
||||
echo " -c Copy configuration to target."
|
||||
echo " -r Reboot after completion."
|
||||
exit 1
|
||||
@@ -36,7 +34,7 @@ check_flake() {
|
||||
}
|
||||
|
||||
check_host() {
|
||||
if ! nix flake show --allow-import-from-derivation --quiet --json "$flake" 2>/dev/null | jq -e ".nixosConfigurations[\"$host\"]" &>/dev/null; then
|
||||
if ! nix flake show --quiet --json "$flake" 2>/dev/null | jq -e ".nixosConfigurations[\"$host\"]" &>/dev/null; then
|
||||
echo "Host '$host' not found in flake."
|
||||
exit 1
|
||||
fi
|
||||
@@ -50,23 +48,34 @@ check_key() {
|
||||
}
|
||||
|
||||
set_password_file() {
|
||||
SOPS_AGE_KEY_FILE="$flake/secrets/$key/key.txt"
|
||||
export SOPS_AGE_KEY_FILE
|
||||
install -m 600 /dev/null /tmp/keyfile
|
||||
sops --decrypt --extract "['luks']" "$flake/secrets/hosts/$host/secrets.yaml" > /tmp/keyfile
|
||||
unset SOPS_AGE_KEY_FILE
|
||||
if [[ -n "$password_file" ]]; then
|
||||
if [[ ! -f "$password_file" ]]; then
|
||||
echo "LUKS key file '$password_file' not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ln -sf "$(realpath "$password_file")" /tmp/installer.key
|
||||
else
|
||||
echo "Enter password for LUKS encryption:"
|
||||
IFS= read -r -s password
|
||||
echo "Enter password again to confirm: "
|
||||
IFS= read -r -s password_check
|
||||
[ "$password" != "$password_check" ]
|
||||
echo -n "$password" > /tmp/installer.key
|
||||
unset password password_check
|
||||
fi
|
||||
}
|
||||
|
||||
prepare_disk() {
|
||||
local disko_mode="$1"
|
||||
mkdir -p /mnt
|
||||
root=$(mktemp -d /mnt/install.XXXXXX)
|
||||
disko -m "$disko_mode" --yes-wipe-all-disks --root-mountpoint "$root" "$flake/hosts/$host/format.nix"
|
||||
disko -m "$disko_mode" --yes-wipe-all-disks --root-mountpoint "$root" "$flake/hosts/$host/format.nix" --arg device "\"$device\""
|
||||
}
|
||||
|
||||
copy_sops_keys() {
|
||||
mkdir -p "$root/persist/state/etc/ssh"
|
||||
cp -f "$flake/secrets/hosts/$host/ssh_host_ed25519_key" "$root/persist/state/etc/ssh/ssh_host_ed25519_key"
|
||||
copy_keys() {
|
||||
mkdir -p "$root/persist/etc/ssh"
|
||||
cp "$flake/hosts/$host/secrets/ssh_host_ed25519_key" "$root/persist/etc/ssh/ssh_host_ed25519_key"
|
||||
|
||||
for path in "$flake/hosts/$host/users"/*; do
|
||||
if [[ -z "$key" ]]; then
|
||||
@@ -75,119 +84,90 @@ copy_sops_keys() {
|
||||
|
||||
local user
|
||||
user=$(basename "$path")
|
||||
|
||||
mkdir -p "$root/persist/state/home/$user/.config/sops-nix"
|
||||
cp -f "$flake/secrets/$key/key.txt" "$root/persist/state/home/$user/.config/sops-nix/key.txt"
|
||||
|
||||
owner=$(cat "$flake/hosts/$host/users/$user/uid")
|
||||
group=100
|
||||
chown "$owner:$group" \
|
||||
"$root/persist/state/home/$user" \
|
||||
"$root/persist/state/home/$user/.config" \
|
||||
"$root/persist/state/home/$user/.config/sops-nix" \
|
||||
"$root/persist/state/home/$user/.config/sops-nix/key.txt"
|
||||
mkdir -p "$root/persist/home/$user/.config/sops-nix"
|
||||
cp "$flake/secrets/$key/key.txt" "$root/persist/home/$user/.config/sops-nix/key.txt"
|
||||
chown -R "$(cat "$flake/hosts/$host/users/$user/uid"):100" "$root/persist/home/$user"
|
||||
done
|
||||
}
|
||||
|
||||
copy_secure_boot_keys() {
|
||||
mkdir -p "$root/persist/state/var/lib/sbctl/keys"/{db,KEK,PK}
|
||||
|
||||
SOPS_AGE_KEY_FILE="$flake/secrets/$key/key.txt"
|
||||
export SOPS_AGE_KEY_FILE
|
||||
|
||||
sops --decrypt --extract "['guid']" "$flake/secrets/domains/lanzaboote/secrets.yaml" > "$root/persist/state/var/lib/sbctl/GUID"
|
||||
sops --decrypt --extract "['keys']['kek']['key']" "$flake/secrets/domains/lanzaboote/secrets.yaml" > "$root/persist/state/var/lib/sbctl/keys/KEK/KEK.key"
|
||||
sops --decrypt --extract "['keys']['kek']['pem']" "$flake/secrets/domains/lanzaboote/secrets.yaml" > "$root/persist/state/var/lib/sbctl/keys/KEK/KEK.pem"
|
||||
sops --decrypt --extract "['keys']['pk']['key']" "$flake/secrets/domains/lanzaboote/secrets.yaml" > "$root/persist/state/var/lib/sbctl/keys/PK/PK.key"
|
||||
sops --decrypt --extract "['keys']['pk']['pem']" "$flake/secrets/domains/lanzaboote/secrets.yaml" > "$root/persist/state/var/lib/sbctl/keys/PK/PK.pem"
|
||||
sops --decrypt --extract "['keys']['db']['key']" "$flake/secrets/domains/lanzaboote/secrets.yaml" > "$root/persist/state/var/lib/sbctl/keys/db/db.key"
|
||||
sops --decrypt --extract "['keys']['db']['pem']" "$flake/secrets/domains/lanzaboote/secrets.yaml" > "$root/persist/state/var/lib/sbctl/keys/db/db.pem"
|
||||
|
||||
chmod 400 "$root/persist/state/var/lib/sbctl/keys"/*/*
|
||||
|
||||
unset SOPS_AGE_KEY_FILE
|
||||
|
||||
mkdir -p "$root/var/lib/sbctl"
|
||||
mount --bind -o X-fstrim.notrim,x-gvfs-hide "$root/persist/state/var/lib/sbctl" "$root/var/lib/sbctl"
|
||||
}
|
||||
|
||||
install_nixos() {
|
||||
install() {
|
||||
nixos-install --root "$root" --flake "$flake#$host" --no-root-passwd
|
||||
}
|
||||
|
||||
enroll_secure_boot() {
|
||||
sbctl enroll-keys --microsoft
|
||||
}
|
||||
|
||||
copy_config() {
|
||||
echo "Copying configuration..."
|
||||
mkdir -p "$root/persist/user/etc"
|
||||
rm -rf "$root/persist/user/etc/nixos"
|
||||
cp -r "$flake" "$root/persist/user/etc/nixos"
|
||||
rm -rf "$root/persist/etc/nixos"
|
||||
cp -r "$flake" "$root/persist/etc/nixos"
|
||||
}
|
||||
|
||||
finish() {
|
||||
echo "Rebooting system..."
|
||||
trap - EXIT
|
||||
cleanup
|
||||
reboot
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
rm -f /tmp/keyfile
|
||||
if [[ -d "$root" ]]; then umount "$root/var/lib/sbctl"; fi
|
||||
if [[ -n "$host" ]]; then disko -m "unmount" "$flake/hosts/$host/format.nix"; fi
|
||||
rm -f /tmp/installer.key
|
||||
if [[ -n "$host" && -n "$device" ]]; then disko -m "unmount" "$flake/hosts/$host/format.nix" --arg device "\"$device\""; fi
|
||||
if [[ -d "$root" ]]; then rmdir "$root"; fi
|
||||
}
|
||||
|
||||
main() {
|
||||
check_root
|
||||
check_network
|
||||
check_root
|
||||
check_network
|
||||
|
||||
if [[ "$#" -lt 1 ]]; then usage; fi
|
||||
if [[ "$#" -lt 1 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
flake="$(realpath "$1")"
|
||||
check_flake
|
||||
shift
|
||||
flake="$(realpath "$1")"
|
||||
check_flake
|
||||
shift
|
||||
|
||||
mode=""
|
||||
host=""
|
||||
key=""
|
||||
enroll_secure_boot_flag="false"
|
||||
copy_config_flag="false"
|
||||
reboot_flag="false"
|
||||
mode=""
|
||||
host=""
|
||||
key=""
|
||||
password_file=""
|
||||
copy_config_flag="false"
|
||||
reboot_flag="false"
|
||||
|
||||
while getopts "m:h:k:scr" opt; do
|
||||
case "$opt" in
|
||||
m) mode="$OPTARG" ;;
|
||||
h) host="$OPTARG" ;;
|
||||
k) key="$OPTARG" ;;
|
||||
s) enroll_secure_boot_flag="true" ;;
|
||||
c) copy_config_flag="true" ;;
|
||||
r) reboot_flag="true" ;;
|
||||
*) usage ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$mode" || -z "$host" ]]; then usage; fi
|
||||
|
||||
check_host
|
||||
check_key
|
||||
set_password_file
|
||||
|
||||
case "$mode" in
|
||||
install) prepare_disk "destroy,format,mount";;
|
||||
repair) prepare_disk "mount";;
|
||||
*)
|
||||
echo "Invalid mode: $mode"
|
||||
usage
|
||||
;;
|
||||
while getopts "m:h:k:p:cr" opt; do
|
||||
case "$opt" in
|
||||
m) mode="$OPTARG" ;;
|
||||
h) host="$OPTARG" ;;
|
||||
k) key="$OPTARG" ;;
|
||||
p) password_file="$OPTARG" ;;
|
||||
c) copy_config_flag="true" ;;
|
||||
r) reboot_flag="true" ;;
|
||||
*) usage ;;
|
||||
esac
|
||||
done
|
||||
|
||||
copy_sops_keys
|
||||
copy_secure_boot_keys
|
||||
if [[ -z "$mode" || -z "$host" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
install_nixos
|
||||
check_host
|
||||
check_key
|
||||
until set_password_file; do echo "Passwords did not match, please try again."; done
|
||||
|
||||
[[ "$enroll_secure_boot_flag" == "true" ]] && enroll_secure_boot
|
||||
[[ "$copy_config_flag" == "true" ]] && copy_config
|
||||
device=$(grep -oP '(?<=device = ")[^"]+' "$flake/hosts/$host/default.nix")
|
||||
|
||||
cleanup
|
||||
|
||||
[[ "$reboot_flag" == "true" ]] && reboot
|
||||
}
|
||||
|
||||
main "$@"
|
||||
case "$mode" in
|
||||
install)
|
||||
prepare_disk "destroy,format,mount"
|
||||
copy_keys
|
||||
install
|
||||
if [[ "$copy_config_flag" == "true" ]]; then copy_config; fi
|
||||
if [[ "$reboot_flag" == "true" ]]; then finish; fi
|
||||
;;
|
||||
repair)
|
||||
prepare_disk "mount"
|
||||
install
|
||||
if [[ "$reboot_flag" == "true" ]]; then finish; fi
|
||||
;;
|
||||
*)
|
||||
echo "Invalid mode: $mode"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ ... }:
|
||||
{
|
||||
programs.nix-ld.enable = true;
|
||||
programs.nix-ld = {
|
||||
enable = true;
|
||||
libraries = [ ];
|
||||
};
|
||||
}
|
||||
|
@@ -1,4 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ nix-update ];
|
||||
}
|
@@ -1,19 +1,8 @@
|
||||
{ config, inputs, ... }:
|
||||
{
|
||||
sops = {
|
||||
secrets = {
|
||||
"git/credentials/github.com/public/username".sopsFile =
|
||||
"${inputs.secrets}/domains/personal/secrets.yaml";
|
||||
"git/credentials/github.com/public/password".sopsFile =
|
||||
"${inputs.secrets}/domains/personal/secrets.yaml";
|
||||
};
|
||||
|
||||
templates.nix-access-tokens = {
|
||||
content = ''
|
||||
access-tokens = github.com=${config.sops.placeholder."git/credentials/github.com/public/password"}
|
||||
'';
|
||||
group = "users";
|
||||
};
|
||||
sops.secrets."nix/accessTokens/github" = {
|
||||
sopsFile = ../../../../../secrets/personal/secrets.yaml;
|
||||
group = "users";
|
||||
};
|
||||
|
||||
nix = {
|
||||
@@ -23,18 +12,14 @@
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
download-buffer-size = 524288000;
|
||||
};
|
||||
|
||||
channel.enable = false;
|
||||
|
||||
registry.self.flake = inputs.self;
|
||||
gc.automatic = true;
|
||||
optimise.automatic = true;
|
||||
|
||||
registry.self.flake = inputs.self;
|
||||
|
||||
extraOptions = ''
|
||||
!include ${config.sops.templates.nix-access-tokens.path}
|
||||
!include ${config.sops.secrets."nix/accessTokens/github".path}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
@@ -1,9 +1,6 @@
|
||||
{ inputs, system, ... }:
|
||||
{ inputs, ... }:
|
||||
{
|
||||
imports = [ inputs.nur.modules.nixos.default ];
|
||||
|
||||
nixpkgs = {
|
||||
hostPlatform = system;
|
||||
config.allowUnfree = true;
|
||||
};
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
}
|
||||
|
@@ -8,16 +8,12 @@
|
||||
};
|
||||
pulse.enable = true;
|
||||
jack.enable = true;
|
||||
extraConfig.pipewire-pulse.pipewire-pulse = {
|
||||
"pulse.cmd" = [
|
||||
extraConfig.pipewire-pulse = {
|
||||
pulse.cmd = [
|
||||
{
|
||||
cmd = "load-module";
|
||||
args = "module-switch-on-connect";
|
||||
}
|
||||
{
|
||||
cmd = "load-module";
|
||||
args = "module-combine-sink";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
@@ -1,27 +0,0 @@
|
||||
{ pkgs, inputs, ... }:
|
||||
{
|
||||
imports = [ inputs.quadlet-nix.nixosModules.quadlet ];
|
||||
|
||||
virtualisation = {
|
||||
podman.enable = true;
|
||||
|
||||
containers = {
|
||||
enable = true;
|
||||
storage.settings.storage.driver = "btrfs";
|
||||
};
|
||||
|
||||
quadlet = {
|
||||
enable = true;
|
||||
autoEscape = true;
|
||||
};
|
||||
};
|
||||
|
||||
environment = {
|
||||
persistence."/persist/state"."/var/lib/containers".create = "directory";
|
||||
|
||||
systemPackages = with pkgs; [
|
||||
podman-compose
|
||||
kompose
|
||||
];
|
||||
};
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
powerManagement.enable = true;
|
||||
}
|
5
hosts/common/configs/system/powertop/default.nix
Normal file
5
hosts/common/configs/system/powertop/default.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ powertop ];
|
||||
powerManagement.powertop.enable = true;
|
||||
}
|
@@ -18,19 +18,19 @@
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist/state" = {
|
||||
environment.persistence."/persist" = {
|
||||
"/var/lib/cups/ppd" = { };
|
||||
"/var/lib/cups/printers.conf" = { };
|
||||
};
|
||||
|
||||
systemd = {
|
||||
services.cups.after = [
|
||||
config.environment.persistence."/persist/state"."/var/lib/cups/ppd".mount
|
||||
config.environment.persistence."/persist/state"."/var/lib/cups/printers.conf".mount
|
||||
config.environment.persistence."/persist"."/var/lib/cups/ppd".mount
|
||||
config.environment.persistence."/persist"."/var/lib/cups/printers.conf".mount
|
||||
];
|
||||
sockets.cups.after = [
|
||||
config.environment.persistence."/persist/state"."/var/lib/cups/ppd".mount
|
||||
config.environment.persistence."/persist/state"."/var/lib/cups/printers.conf".mount
|
||||
config.environment.persistence."/persist"."/var/lib/cups/ppd".mount
|
||||
config.environment.persistence."/persist"."/var/lib/cups/printers.conf".mount
|
||||
];
|
||||
};
|
||||
}
|
||||
|
@@ -1,12 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
smartmontools
|
||||
nvme-cli
|
||||
];
|
||||
|
||||
services.smartd = {
|
||||
enable = true;
|
||||
defaults.autodetected = "-a -o on -n idle,10 -s (S/../.././02|L/../../7/04)";
|
||||
};
|
||||
}
|
@@ -8,27 +8,13 @@
|
||||
imports = [ inputs.sops-nix.nixosModules.sops ];
|
||||
|
||||
environment = {
|
||||
persistence."/persist/state"."/etc/ssh/ssh_host_ed25519_key" = { };
|
||||
|
||||
systemPackages = with pkgs; [
|
||||
sops
|
||||
age
|
||||
ssh-to-age
|
||||
];
|
||||
persistence."/persist"."/etc/ssh/ssh_host_ed25519_key" = { };
|
||||
systemPackages = with pkgs; [ sops ];
|
||||
};
|
||||
|
||||
sops = {
|
||||
defaultSopsFile = "${inputs.secrets}/hosts/${config.networking.hostName}/secrets.yaml";
|
||||
|
||||
age = {
|
||||
generateKey = true;
|
||||
keyFile = "/var/lib/sops-nix/key.txt";
|
||||
|
||||
sshKeyPaths =
|
||||
if config.environment.impermanence.enable then
|
||||
[ config.environment.persistence."/persist/state"."/etc/ssh/ssh_host_ed25519_key".source ]
|
||||
else
|
||||
[ "/etc/ssh/ssh_host_ed25519_key" ];
|
||||
};
|
||||
sops.age = {
|
||||
generateKey = true;
|
||||
sshKeyPaths = [ config.environment.persistence."/persist"."/etc/ssh/ssh_host_ed25519_key".source ];
|
||||
keyFile = "/var/lib/sops-nix/key.txt";
|
||||
};
|
||||
}
|
||||
|
@@ -1,23 +1,12 @@
|
||||
{ inputs, ... }:
|
||||
{ ... }:
|
||||
{
|
||||
programs.ssh.knownHosts = {
|
||||
installer.publicKeyFile = "${inputs.secrets}/hosts/installer/ssh_host_ed25519_key.pub";
|
||||
elara.publicKeyFile = "${inputs.secrets}/hosts/elara/ssh_host_ed25519_key.pub";
|
||||
himalia.publicKeyFile = "${inputs.secrets}/hosts/himalia/ssh_host_ed25519_key.pub";
|
||||
programs.ssh = {
|
||||
startAgent = true;
|
||||
|
||||
jupiter = {
|
||||
publicKeyFile = "${inputs.secrets}/hosts/jupiter/ssh_host_ed25519_key.pub";
|
||||
extraHostNames = [ "karaolidis.com" ];
|
||||
};
|
||||
|
||||
jupiter-sish = {
|
||||
publicKeyFile = "${inputs.secrets}/hosts/jupiter/ssh_sish_ed25519_key.pub";
|
||||
extraHostNames = [ "karaolidis.com" ];
|
||||
};
|
||||
|
||||
jupiter-vps = {
|
||||
publicKeyFile = "${inputs.secrets}/hosts/jupiter-vps/ssh_host_ed25519_key.pub";
|
||||
extraHostNames = [ "vps.karaolidis.com" ];
|
||||
knownHosts = {
|
||||
installer.publicKeyFile = ../../../../installer/secrets/ssh_host_ed25519_key.pub;
|
||||
eirene.publicKeyFile = ../../../../eirene/secrets/ssh_host_ed25519_key.pub;
|
||||
elara.publicKeyFile = ../../../../elara/secrets/ssh_host_ed25519_key.pub;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@@ -1,15 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
kitty.terminfo
|
||||
tmux.terminfo
|
||||
];
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
PrintMotd = false;
|
||||
};
|
||||
};
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
security.pam.services.sudo.nodelay = true;
|
||||
}
|
@@ -1,4 +1,17 @@
|
||||
{ ... }:
|
||||
{ inputs, ... }:
|
||||
{
|
||||
system.stateVersion = "24.11";
|
||||
system = {
|
||||
autoUpgrade = {
|
||||
enable = true;
|
||||
flake = inputs.self.outPath;
|
||||
flags = [
|
||||
"--update-input"
|
||||
"nixpkgs"
|
||||
"-L"
|
||||
];
|
||||
dates = "02:00";
|
||||
};
|
||||
|
||||
stateVersion = "24.11";
|
||||
};
|
||||
}
|
||||
|
@@ -1,5 +1,3 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
case "$2" in
|
||||
connectivity-change)
|
||||
if timezone=$(curl --fail https://ipapi.co/timezone); then
|
||||
|
12
hosts/common/configs/system/tlp/default.nix
Normal file
12
hosts/common/configs/system/tlp/default.nix
Normal file
@@ -0,0 +1,12 @@
|
||||
{ ... }:
|
||||
{
|
||||
services.tlp = {
|
||||
enable = true;
|
||||
settings = {
|
||||
CPU_SCALING_GOVERNOR_ON_AC = "performance";
|
||||
CPU_SCALING_GOVERNOR_ON_BAT = "powersave";
|
||||
CPU_ENERGY_PERF_POLICY_ON_AC = "performance";
|
||||
CPU_ENERGY_PERF_POLICY_ON_BAT = "power";
|
||||
};
|
||||
};
|
||||
}
|
4
hosts/common/configs/system/tree/default.nix
Normal file
4
hosts/common/configs/system/tree/default.nix
Normal file
@@ -0,0 +1,4 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ tree ];
|
||||
}
|
@@ -1,8 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
services.upower = {
|
||||
enable = true;
|
||||
allowRiskyCriticalPowerAction = true;
|
||||
criticalPowerAction = "Ignore";
|
||||
};
|
||||
}
|
4
hosts/common/configs/system/wget/default.nix
Normal file
4
hosts/common/configs/system/wget/default.nix
Normal file
@@ -0,0 +1,4 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ wget ];
|
||||
}
|
@@ -6,9 +6,7 @@
|
||||
};
|
||||
|
||||
environment = {
|
||||
persistence."/persist/state"."/var/lib/zsh" = { };
|
||||
persistence."/persist"."/var/lib/zsh" = { };
|
||||
pathsToLink = [ "/share/zsh" ];
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [ "d /var/lib/zsh 0755 root root" ];
|
||||
}
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
nixpkgs.overlays = [
|
||||
@@ -14,7 +17,7 @@
|
||||
|
||||
users.users.${user}.extraGroups = [ "adbusers" ];
|
||||
|
||||
environment.persistence."/persist/state" = {
|
||||
environment.persistence."/persist" = {
|
||||
"${home}/.local/share/android/adbkey" = { };
|
||||
"${home}/.local/share/android/adbkey.pub" = { };
|
||||
};
|
||||
|
8
hosts/common/configs/user/console/bashmount/default.nix
Normal file
8
hosts/common/configs/user/console/bashmount/default.nix
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.programs.bashmount.enable = true;
|
||||
}
|
@@ -1,8 +1,13 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
users.users.${user}.extraGroups = [
|
||||
"video"
|
||||
"inputs"
|
||||
];
|
||||
|
||||
home-manager.users.${user}.home.packages = with pkgs; [ brightnessctl ];
|
||||
}
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
home-manager.users.${user}.programs.btop = {
|
||||
@@ -11,7 +14,7 @@
|
||||
update_ms = 1000;
|
||||
proc_tree = true;
|
||||
cpu_single_graph = true;
|
||||
disks_filter = "/ /nix /persist";
|
||||
disks_filter = "/ /nix /persist /cache";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@@ -1,19 +0,0 @@
|
||||
{ user, home }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
hmConfig = config.home-manager.users.${user};
|
||||
in
|
||||
{
|
||||
home-manager.users.${user} = {
|
||||
home.packages = with pkgs; [ dive ];
|
||||
|
||||
xdg.configFile."dive/config.yaml" = lib.mkIf (
|
||||
config.virtualisation.podman.enable || hmConfig.services.podman.enable
|
||||
) { source = (pkgs.formats.yaml { }).generate "config.yaml" { container-engine = "podman"; }; };
|
||||
};
|
||||
}
|
55
hosts/common/configs/user/console/docker/default.nix
Normal file
55
hosts/common/configs/user/console/docker/default.nix
Normal file
@@ -0,0 +1,55 @@
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
rootless ? true,
|
||||
}:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
lib.mkMerge [
|
||||
{
|
||||
virtualisation.docker.rootless = {
|
||||
enable = rootless;
|
||||
setSocketVariable = true;
|
||||
enableOnBoot = false;
|
||||
storageDriver = "btrfs";
|
||||
|
||||
daemon.settings = {
|
||||
experimental = true;
|
||||
ipv6 = true;
|
||||
fixed-cidr-v6 = "fd00::/80";
|
||||
};
|
||||
|
||||
autoPrune = {
|
||||
enable = true;
|
||||
flags = [ "--all" ];
|
||||
};
|
||||
};
|
||||
|
||||
home-manager.users.${user}.home = {
|
||||
packages = with pkgs; [ docker-compose ];
|
||||
|
||||
sessionVariables = {
|
||||
DOCKER_CONFIG = "${home}/.config/docker";
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf rootless {
|
||||
environment.persistence."/persist"."${home}/.local/share/docker" = { };
|
||||
|
||||
systemd.user = {
|
||||
services.docker.after = [
|
||||
config.environment.persistence."/persist"."${home}/.local/share/docker".mount
|
||||
];
|
||||
sockets.docker.after = [
|
||||
config.environment.persistence."/persist"."${home}/.local/share/docker".mount
|
||||
];
|
||||
};
|
||||
})
|
||||
(lib.mkIf (!rootless) {
|
||||
users.users.${user}.extraGroups = [ "docker" ];
|
||||
})
|
||||
]
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
home-manager.users.${user}.programs.fastfetch.enable = true;
|
||||
|
@@ -1,8 +1,8 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.home.packages = with pkgs; [
|
||||
ffmpeg
|
||||
mediainfo
|
||||
];
|
||||
home-manager.users.${user}.home.packages = with pkgs; [ ffmpeg ];
|
||||
}
|
||||
|
@@ -1,5 +1,3 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
git interpret-trailers --if-exists doNothing --trailer \
|
||||
"Signed-off-by: $(git config user.name) <$(git config user.email)>" \
|
||||
--in-place "$1"
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
|
@@ -1,5 +1,3 @@
|
||||
# shellcheck shell=bash
|
||||
|
||||
install -d -m 700 "$GNUPGHOME"
|
||||
|
||||
KEYS="$HOME/.config/sops-nix/secrets/gpg"
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ config, inputs, ... }:
|
||||
{
|
||||
imports = [ inputs.home-manager.nixosModules.default ];
|
||||
@@ -6,7 +9,9 @@
|
||||
programs.dconf.enable = true;
|
||||
|
||||
home-manager = {
|
||||
extraSpecialArgs = { inherit inputs; };
|
||||
extraSpecialArgs = {
|
||||
inherit inputs;
|
||||
};
|
||||
backupFileExtension = "bak";
|
||||
useUserPackages = true;
|
||||
useGlobalPkgs = true;
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.home.packages = with pkgs; [ imagemagick ];
|
||||
|
@@ -1,12 +0,0 @@
|
||||
{ user, home }:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.home.packages = with pkgs; [
|
||||
iproute2
|
||||
iptables
|
||||
ipset
|
||||
ethtool
|
||||
tcpdump
|
||||
ipcalc
|
||||
];
|
||||
}
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
home-manager.users.${user}.programs.jq.enable = true;
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
@@ -6,19 +9,51 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
k9s = prev.k9s.overrideAttrs (oldAttrs: {
|
||||
patches = oldAttrs.patches or [ ] ++ [ ./remove-splash.patch ];
|
||||
});
|
||||
})
|
||||
];
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/user"."${home}/.kube" = { };
|
||||
"/persist/cache"."${home}/.kube/cache" = { };
|
||||
"/persist"."${home}/.kube" = { };
|
||||
"/cache"."${home}/.kube/cache" = { };
|
||||
};
|
||||
|
||||
users.users.${user}.extraGroups = [ "kubernetes" ];
|
||||
|
||||
sops.secrets = {
|
||||
"kubernetes/cert/accounts/${user}/crt" = {
|
||||
key = "kubernetes/cert/accounts/users/crt";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/${user}/key" = {
|
||||
key = "kubernetes/cert/accounts/users/key";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.kubeconfigs.${user} =
|
||||
config.services.kubernetes.lib.mkKubeConfig user config.sops.secrets."kubernetes/ca/kubernetes/crt".path
|
||||
config.sops.secrets."kubernetes/cert/accounts/${user}/crt".path
|
||||
config.sops.secrets."kubernetes/cert/accounts/${user}/key".path;
|
||||
|
||||
home-manager.users.${user} = {
|
||||
home.packages = with pkgs; [
|
||||
kubectl
|
||||
kustomize
|
||||
kubernetes-helm
|
||||
kompose
|
||||
kind
|
||||
];
|
||||
home = {
|
||||
packages = with pkgs; [
|
||||
kubectl
|
||||
kustomize
|
||||
kubernetes-helm
|
||||
kompose
|
||||
];
|
||||
|
||||
file.".kube/local".source = config.services.kubernetes.kubeconfigs.${user};
|
||||
};
|
||||
|
||||
programs = {
|
||||
k9s = {
|
||||
@@ -32,20 +67,19 @@
|
||||
ui = {
|
||||
skin = "matugen";
|
||||
logoless = true;
|
||||
splashless = true;
|
||||
reactive = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
zsh = {
|
||||
initContent = ''
|
||||
initExtra = ''
|
||||
kubeswitch() {
|
||||
local target="$HOME/.kube/$1"
|
||||
local config="$HOME/.kube/config"
|
||||
|
||||
if [[ -f "$target" && "$target" != "$config" ]]; then
|
||||
ln -srf "$target" "$config"
|
||||
ln -sf "$target" "$config"
|
||||
echo "Switched kube context to $1"
|
||||
p10k reload
|
||||
else
|
||||
@@ -67,6 +101,6 @@
|
||||
};
|
||||
};
|
||||
|
||||
theme.template.".config/k9s/skins/matugen.yaml".source = ./theme.yaml;
|
||||
theme.template."${home}/.config/k9s/skins/matugen.yaml".source = ./theme.yaml;
|
||||
};
|
||||
}
|
||||
|
123
hosts/common/configs/user/console/kubernetes/remove-splash.patch
Normal file
123
hosts/common/configs/user/console/kubernetes/remove-splash.patch
Normal file
@@ -0,0 +1,123 @@
|
||||
diff --git a/internal/ui/splash.go b/internal/ui/splash.go
|
||||
index bfe58e46..21683c53 100644
|
||||
--- a/internal/ui/splash.go
|
||||
+++ b/internal/ui/splash.go
|
||||
@@ -3,14 +3,6 @@
|
||||
|
||||
package ui
|
||||
|
||||
-import (
|
||||
- "fmt"
|
||||
- "strings"
|
||||
-
|
||||
- "github.com/derailed/k9s/internal/config"
|
||||
- "github.com/derailed/tview"
|
||||
-)
|
||||
-
|
||||
// LogoSmall K9s small log.
|
||||
var LogoSmall = []string{
|
||||
` ____ __.________ `,
|
||||
@@ -30,42 +22,3 @@ var LogoBig = []string{
|
||||
`|____|__ \ /____//____ >\______ /_______ \___|`,
|
||||
` \/ \/ \/ \/ `,
|
||||
}
|
||||
-
|
||||
-// Splash represents a splash screen.
|
||||
-type Splash struct {
|
||||
- *tview.Flex
|
||||
-}
|
||||
-
|
||||
-// NewSplash instantiates a new splash screen with product and company info.
|
||||
-func NewSplash(styles *config.Styles, version string) *Splash {
|
||||
- s := Splash{Flex: tview.NewFlex()}
|
||||
- s.SetBackgroundColor(styles.BgColor())
|
||||
-
|
||||
- logo := tview.NewTextView()
|
||||
- logo.SetDynamicColors(true)
|
||||
- logo.SetTextAlign(tview.AlignCenter)
|
||||
- s.layoutLogo(logo, styles)
|
||||
-
|
||||
- vers := tview.NewTextView()
|
||||
- vers.SetDynamicColors(true)
|
||||
- vers.SetTextAlign(tview.AlignCenter)
|
||||
- s.layoutRev(vers, version, styles)
|
||||
-
|
||||
- s.SetDirection(tview.FlexRow)
|
||||
- s.AddItem(logo, 10, 1, false)
|
||||
- s.AddItem(vers, 1, 1, false)
|
||||
-
|
||||
- return &s
|
||||
-}
|
||||
-
|
||||
-func (s *Splash) layoutLogo(t *tview.TextView, styles *config.Styles) {
|
||||
- logo := strings.Join(LogoBig, fmt.Sprintf("\n[%s::b]", styles.Body().LogoColor))
|
||||
- fmt.Fprintf(t, "%s[%s::b]%s\n",
|
||||
- strings.Repeat("\n", 2),
|
||||
- styles.Body().LogoColor,
|
||||
- logo)
|
||||
-}
|
||||
-
|
||||
-func (s *Splash) layoutRev(t *tview.TextView, rev string, styles *config.Styles) {
|
||||
- fmt.Fprintf(t, "[%s::b]Revision [red::b]%s", styles.Body().FgColor, rev)
|
||||
-}
|
||||
diff --git a/internal/ui/splash_test.go b/internal/ui/splash_test.go
|
||||
deleted file mode 100644
|
||||
index 69b4b50d..00000000
|
||||
--- a/internal/ui/splash_test.go
|
||||
+++ /dev/null
|
||||
@@ -1,22 +0,0 @@
|
||||
-// SPDX-License-Identifier: Apache-2.0
|
||||
-// Copyright Authors of K9s
|
||||
-
|
||||
-package ui_test
|
||||
-
|
||||
-import (
|
||||
- "testing"
|
||||
-
|
||||
- "github.com/derailed/k9s/internal/config"
|
||||
- "github.com/derailed/k9s/internal/ui"
|
||||
- "github.com/stretchr/testify/assert"
|
||||
-)
|
||||
-
|
||||
-func TestNewSplash(t *testing.T) {
|
||||
- s := ui.NewSplash(config.NewStyles(), "bozo")
|
||||
-
|
||||
- x, y, w, h := s.GetRect()
|
||||
- assert.Equal(t, 0, x)
|
||||
- assert.Equal(t, 0, y)
|
||||
- assert.Equal(t, 15, w)
|
||||
- assert.Equal(t, 10, h)
|
||||
-}
|
||||
diff --git a/internal/view/app.go b/internal/view/app.go
|
||||
index 4ac7e7c2..2b3a3fc5 100644
|
||||
--- a/internal/view/app.go
|
||||
+++ b/internal/view/app.go
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
var ExitStatus = ""
|
||||
|
||||
const (
|
||||
- splashDelay = 1 * time.Second
|
||||
clusterRefresh = 15 * time.Second
|
||||
clusterInfoWidth = 50
|
||||
clusterInfoPad = 15
|
||||
@@ -165,8 +164,7 @@ func (a *App) layout(ctx context.Context) {
|
||||
}
|
||||
main.AddItem(flash, 1, 1, false)
|
||||
|
||||
- a.Main.AddPage("main", main, true, false)
|
||||
- a.Main.AddPage("splash", ui.NewSplash(a.Styles, a.version), true, true)
|
||||
+ a.Main.AddPage("main", main, true, true)
|
||||
a.toggleHeader(!a.Config.K9s.IsHeadless(), !a.Config.K9s.IsLogoless())
|
||||
}
|
||||
|
||||
@@ -520,10 +518,7 @@ func (a *App) Run() error {
|
||||
a.Resume()
|
||||
|
||||
go func() {
|
||||
- <-time.After(splashDelay)
|
||||
a.QueueUpdateDraw(func() {
|
||||
- a.Main.SwitchToPage("main")
|
||||
- // if command bar is already active, focus it
|
||||
if a.CmdBuff().IsActive() {
|
||||
a.SetFocus(a.Prompt())
|
||||
}
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.dconf.settings = {
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.home.packages = with pkgs; [ lsof ];
|
||||
|
@@ -1,5 +0,0 @@
|
||||
{ user, home }:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.home.packages = with pkgs; [ mprocs ];
|
||||
}
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user} = {
|
||||
|
@@ -1,47 +0,0 @@
|
||||
{ user, home }:
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
# FIXME: https://github.com/hrkfdn/ncspot/issues/1681#issuecomment-3186274719
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
ncspot = prev.ncspot.overrideAttrs (oldAttrs: rec {
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "hrkfdn";
|
||||
repo = "ncspot";
|
||||
rev = "aac67d631f25bbc79f509d34aa85e6daff954830";
|
||||
hash = "sha256-B6BA1ksfDEySZH6gzkU5khOzwXAmeHbMHsx3sXd9lbs=";
|
||||
};
|
||||
|
||||
cargoDeps = pkgs.rustPlatform.fetchCargoVendor {
|
||||
inherit src;
|
||||
hash = "sha256-HrQJiIzSvu/vR03UdnCcU6TGToBDKKDC6XscjvX3KPE=";
|
||||
};
|
||||
});
|
||||
})
|
||||
];
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/state"."${home}/.config/ncspot/userstate.cbor" = { };
|
||||
"/persist/cache"."${home}/.cache/ncspot" = { };
|
||||
};
|
||||
|
||||
home-manager.users.${user} = {
|
||||
programs.ncspot.enable = true;
|
||||
|
||||
theme = {
|
||||
template.".config/ncspot/config.toml".source = ./theme.toml;
|
||||
|
||||
reloadExtraConfig = "${
|
||||
lib.meta.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "reload-ncspot";
|
||||
runtimeInputs = with pkgs; [ netcat ];
|
||||
text = ''
|
||||
printf "reload\n" | nc -W 1 -U "''${XDG_RUNTIME_DIR:-/run/user/$UID}/ncspot/ncspot.sock"
|
||||
'';
|
||||
}
|
||||
)
|
||||
} &";
|
||||
};
|
||||
};
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
use_nerdfont = true
|
||||
volnorm = true
|
||||
default_keybindings = true
|
||||
library_tabs = [ "albums", "artists", "playlists", "browse" ]
|
||||
|
||||
[keybindings]
|
||||
"Esc" = "back"
|
||||
|
||||
[theme]
|
||||
background = "{{colors.surface.default.hex}}"
|
||||
primary = "{{colors.on_surface.default.hex}}"
|
||||
secondary = "{{colors.inverse_surface.default.hex}}"
|
||||
title = "{{colors.primary.default.hex}}"
|
||||
playing = "{{colors.primary.default.hex}}"
|
||||
playing_bg = "{{colors.surface.default.hex}}"
|
||||
highlight = "{{colors.on_primary.default.hex}}"
|
||||
highlight_bg = "{{colors.primary.default.hex}}"
|
||||
playing_selected = "{{colors.on_primary.default.hex}}"
|
||||
error = "{{colors.on_error.default.hex}}"
|
||||
error_bg = "{{colors.error.default.hex}}"
|
||||
statusbar = "{{colors.primary.default.hex}}"
|
||||
statusbar_progress = "{{colors.primary.default.hex}}"
|
||||
statusbar_bg = "{{colors.surface.default.hex}}"
|
||||
cmdline = "{{colors.on_surface.default.hex}}"
|
||||
cmdline_bg = "{{colors.surface.default.hex}}"
|
||||
search_match = "{{colors.tertiary.default.hex}}"
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
home-manager.users.${user}.programs = {
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
home-manager.users.${user}.programs.zsh.shellAliases.ncl = "sudo nix-cleanup";
|
||||
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{
|
||||
lib,
|
||||
inputs,
|
||||
@@ -9,7 +12,7 @@
|
||||
home-manager.users.${user}.programs.zsh = {
|
||||
shellAliases.nd = "nix-develop";
|
||||
|
||||
initContent =
|
||||
initExtra =
|
||||
let
|
||||
devShells = lib.strings.concatStringsSep " " (
|
||||
lib.attrsets.mapAttrsToList (key: _: key) inputs.self.devShells.${system}
|
||||
@@ -32,16 +35,7 @@
|
||||
done
|
||||
|
||||
if [[ -z "$devshell" ]]; then
|
||||
if [ ! -f flake.nix ]; then
|
||||
cp "${./template.nix}" flake.nix
|
||||
chmod 755 flake.nix
|
||||
fi
|
||||
|
||||
if [ ! treefmt.nix ]; then
|
||||
cp "${./treefmt.nix}" treefmt.nix
|
||||
chmod 755 treefmt.nix
|
||||
fi
|
||||
|
||||
if [ ! -f flake.nix ]; then cp "${./template.nix}" flake.nix; fi
|
||||
nix develop -c "$SHELL"
|
||||
else
|
||||
nix develop self#"$devshell" -c "$SHELL"
|
||||
|
@@ -1,31 +1,30 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
nixpkgs = {
|
||||
type = "github";
|
||||
owner = "karaolidis";
|
||||
repo = "nixpkgs";
|
||||
ref = "integration";
|
||||
};
|
||||
|
||||
treefmt-nix = {
|
||||
url = "github:numtide/treefmt-nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
flake-utils = {
|
||||
url = "github:numtide/flake-utils";
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs:
|
||||
(
|
||||
{ nixpkgs, ... }@inputs:
|
||||
inputs.flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
system = "x86_64-linux";
|
||||
|
||||
pkgs = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
config.allowUnfree = true;
|
||||
};
|
||||
|
||||
treefmt = inputs.treefmt-nix.lib.evalModule pkgs ./treefmt.nix;
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
{
|
||||
devShells.${system}.default = pkgs.mkShell { packages = with pkgs; [ ]; };
|
||||
devShells.default = pkgs.mkShell {
|
||||
packages = [ ];
|
||||
};
|
||||
|
||||
formatter.${system} = treefmt.config.build.wrapper;
|
||||
checks.formatting.${system} = treefmt.config.build.check inputs.self;
|
||||
formatter = pkgs.nixfmt-rfc-style;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
@@ -1,13 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
projectRootFile = "flake.nix";
|
||||
|
||||
programs = {
|
||||
nixfmt = {
|
||||
enable = true;
|
||||
strict = true;
|
||||
};
|
||||
};
|
||||
|
||||
settings.global.excludes = [ ".envrc" ];
|
||||
}
|
@@ -1,4 +1,7 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
@@ -7,19 +10,83 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
home-manager.users.${user}.programs = {
|
||||
direnv = {
|
||||
enable = true;
|
||||
silent = true;
|
||||
nix-direnv.enable = true;
|
||||
enableZshIntegration = true;
|
||||
|
||||
config = {
|
||||
global.warn_timeout = 0;
|
||||
home-manager.users.${user} = {
|
||||
programs = {
|
||||
direnv = {
|
||||
enable = true;
|
||||
silent = true;
|
||||
nix-direnv.enable = true;
|
||||
enableZshIntegration = true;
|
||||
};
|
||||
|
||||
# https://github.com/direnv/direnv/wiki/Customizing-cache-location
|
||||
stdlib = ''
|
||||
zsh = {
|
||||
shellAliases.nde = "nix-direnv";
|
||||
|
||||
initExtra =
|
||||
let
|
||||
devShells = lib.strings.concatStringsSep " " (
|
||||
lib.attrsets.mapAttrsToList (key: _: key) inputs.self.devShells.${system}
|
||||
);
|
||||
in
|
||||
''
|
||||
nix-direnv() {
|
||||
local devshell=""
|
||||
local hide=false
|
||||
|
||||
while getopts "s:h" opt; do
|
||||
case $opt in
|
||||
s)
|
||||
devshell=$OPTARG
|
||||
;;
|
||||
h)
|
||||
hide=true
|
||||
;;
|
||||
*)
|
||||
echo "Usage: nix-direnv [-s <devshell>] [-h]"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$devshell" ]]; then
|
||||
echo "use flake" > .envrc
|
||||
if [ ! -f flake.nix ]; then cp "${../nix-develop/template.nix}" flake.nix; fi
|
||||
else
|
||||
echo "use flake self#$devshell" > .envrc
|
||||
fi
|
||||
|
||||
if hide && git rev-parse --is-inside-work-tree &>/dev/null; then
|
||||
local top
|
||||
top=$(git rev-parse --show-toplevel)
|
||||
if ! grep -q "^\.envrc$" "$top/.gitignore" "$top/.git/info/exclude"; then echo "$(realpath --relative-to="$top" .envrc)" >> "$top/.git/info/exclude"; fi
|
||||
if [ -z "$devshell" ]; then
|
||||
if ! grep -q "^flake.nix$" "$top/.gitignore" "$top/.git/info/exclude"; then echo "flake.nix" >> "$top/.git/info/exclude"; fi
|
||||
if ! grep -q "^flake.lock$" "$top/.gitignore" "$top/.git/info/exclude"; then echo "flake.lock" >> "$top/.git/info/exclude"; fi
|
||||
fi
|
||||
fi
|
||||
|
||||
direnv allow
|
||||
}
|
||||
|
||||
_nix-direnv_completion() {
|
||||
local options=(
|
||||
'-s[Dev shell from root flake]:shell:(${devShells})'
|
||||
'-h[Hide .envrc and flake.nix in git]'
|
||||
)
|
||||
|
||||
_arguments -s $options
|
||||
}
|
||||
|
||||
compdef _nix-direnv_completion nix-direnv
|
||||
'';
|
||||
|
||||
p10k.extraRightPromptElements = [ "direnv" ];
|
||||
};
|
||||
};
|
||||
|
||||
# https://github.com/direnv/direnv/wiki/Customizing-cache-location
|
||||
xdg.configFile = {
|
||||
"direnv/direnvrc".text = ''
|
||||
declare -A direnv_layout_dirs
|
||||
direnv_layout_dir() {
|
||||
local hash path
|
||||
@@ -30,89 +97,17 @@
|
||||
)}"
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
zsh = {
|
||||
shellAliases.nde = "nix-direnv";
|
||||
|
||||
initContent =
|
||||
let
|
||||
devShells = lib.strings.concatStringsSep " " (
|
||||
lib.attrsets.mapAttrsToList (key: _: key) inputs.self.devShells.${system}
|
||||
);
|
||||
in
|
||||
''
|
||||
nix-direnv() {
|
||||
local devshell=""
|
||||
local hide=false
|
||||
|
||||
while getopts "s:h" opt; do
|
||||
case $opt in
|
||||
s)
|
||||
devshell="$OPTARG"
|
||||
;;
|
||||
h)
|
||||
hide=true
|
||||
;;
|
||||
*)
|
||||
echo "Usage: nix-direnv [-s <devshell>] [-h]"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$devshell" ]]; then
|
||||
if "$hide"; then
|
||||
echo "use flake path:." > .envrc;
|
||||
else
|
||||
echo "use flake" > .envrc;
|
||||
fi
|
||||
|
||||
if [ ! -f flake.nix ]; then
|
||||
cp "${../nix-develop/template.nix}" flake.nix
|
||||
chmod 755 flake.nix
|
||||
fi
|
||||
|
||||
if [ ! -f treefmt.nix ]; then
|
||||
cp "${../nix-develop/treefmt.nix}" treefmt.nix
|
||||
chmod 755 treefmt.nix
|
||||
fi
|
||||
else
|
||||
echo "use flake self#$devshell" > .envrc
|
||||
fi
|
||||
|
||||
if "$hide" && git rev-parse --is-inside-work-tree &>/dev/null; then
|
||||
local top
|
||||
top="$(git rev-parse --show-toplevel)"
|
||||
if ! grep -q "^\.envrc$" "$top/.gitignore" "$top/.git/info/exclude"; then echo "$(realpath --relative-to="$top" .envrc)" >> "$top/.git/info/exclude"; fi
|
||||
if [ -z "$devshell" ]; then
|
||||
if ! grep -q "^flake.nix$" "$top/.gitignore" "$top/.git/info/exclude"; then echo "flake.nix" >> "$top/.git/info/exclude"; fi
|
||||
if ! grep -q "^flake.lock$" "$top/.gitignore" "$top/.git/info/exclude"; then echo "flake.lock" >> "$top/.git/info/exclude"; fi
|
||||
if ! grep -q "^treefmt.nix$" "$top/.gitignore" "$top/.git/info/exclude"; then echo "treefmt.nix" >> "$top/.git/info/exclude"; fi
|
||||
fi
|
||||
fi
|
||||
|
||||
direnv allow
|
||||
}
|
||||
|
||||
_nix-direnv_completion() {
|
||||
local options=(
|
||||
'-s[Dev shell from root flake]:shell:(${devShells})'
|
||||
'-h[Hide .envrc and flake.nix in git]'
|
||||
)
|
||||
|
||||
_arguments -s $options
|
||||
}
|
||||
|
||||
compdef _nix-direnv_completion nix-direnv
|
||||
'';
|
||||
|
||||
p10k.extraRightPromptElements = [ "direnv" ];
|
||||
"direnv/direnv.toml".source = (
|
||||
(pkgs.formats.toml { }).generate "direnv.toml" {
|
||||
global.warn_timeout = 0;
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/state"."${home}/.local/share/direnv/allow" = { };
|
||||
"/persist/cache"."${home}/.cache/direnv" = { };
|
||||
"/persist"."${home}/.local/share/direnv/allow" = { };
|
||||
"/cache"."${home}/.cache/direnv" = { };
|
||||
};
|
||||
}
|
||||
|
@@ -1,14 +1,13 @@
|
||||
{ user, home }:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.persistence."/persist/cache"."${home}/.cache/nix" = { };
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
environment.persistence."/cache"."${home}/.cache/nix" = { };
|
||||
|
||||
home-manager.users.${user} = {
|
||||
home.packages = with pkgs; [ nurl ];
|
||||
|
||||
programs.zsh.shellAliases = {
|
||||
nrs = "sudo nixos-rebuild switch --flake .#$(hostname) --show-trace";
|
||||
nrb = "sudo nixos-rebuild boot --flake .#$(hostname) --show-trace";
|
||||
};
|
||||
home-manager.users.${user}.programs.zsh.shellAliases = {
|
||||
nrs = "sudo nixos-rebuild switch --flake .#$(hostname) --show-trace";
|
||||
nrb = "sudo nixos-rebuild boot --flake .#$(hostname) --show-trace";
|
||||
};
|
||||
}
|
||||
|
10
hosts/common/configs/user/console/nixpkgs/default.nix
Normal file
10
hosts/common/configs/user/console/nixpkgs/default.nix
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ inputs, ... }:
|
||||
{
|
||||
home-manager.users.${user} = {
|
||||
imports = [ inputs.nur.modules.homeManager.default ];
|
||||
};
|
||||
}
|
@@ -1,5 +0,0 @@
|
||||
{ user, home }:
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
home-manager.users.${user}.home.packages = with pkgs; [ ouch ];
|
||||
}
|
@@ -1,17 +1,19 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
environment.persistence."/persist/state"."${home}/.local/state/wireplumber" = { };
|
||||
environment.persistence."/persist"."${home}/.local/state/wireplumber" = { };
|
||||
|
||||
systemd.user.services.wireplumber.after = [
|
||||
config.environment.persistence."/persist/state"."${home}/.local/state/wireplumber".mount
|
||||
config.environment.persistence."/persist"."${home}/.local/state/wireplumber".mount
|
||||
];
|
||||
|
||||
home-manager.users.${user} = {
|
||||
home.packages = with pkgs; [
|
||||
wireplumber
|
||||
playerctl
|
||||
easyeffects
|
||||
];
|
||||
|
||||
services.playerctld.enable = true;
|
||||
|
@@ -1,33 +0,0 @@
|
||||
{ user, home }:
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
environment.persistence."/persist/state"."${home}/.local/share/containers".create = "directory";
|
||||
|
||||
home-manager.users.${user} = {
|
||||
imports = [ inputs.quadlet-nix.homeManagerModules.quadlet ];
|
||||
|
||||
services.podman = {
|
||||
enable = true;
|
||||
settings.storage.storage.driver = "btrfs";
|
||||
};
|
||||
|
||||
virtualisation.quadlet = {
|
||||
enable = true;
|
||||
autoEscape = true;
|
||||
};
|
||||
|
||||
home = {
|
||||
packages = with pkgs; [
|
||||
podman-compose
|
||||
kompose
|
||||
];
|
||||
|
||||
sessionVariables.REGISTRY_AUTH_FILE = "${home}/.config/containers/auth.json";
|
||||
};
|
||||
};
|
||||
}
|
21
hosts/common/configs/user/console/ranger/default.nix
Normal file
21
hosts/common/configs/user/console/ranger/default.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
environment.persistence."/cache"."${home}/.cache/ranger" = { };
|
||||
|
||||
home-manager.users.${user}.programs = {
|
||||
ranger = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
preview_images = true;
|
||||
preview_images_method = "kitty";
|
||||
};
|
||||
};
|
||||
|
||||
zsh.p10k.extraRightPromptElements = [ "ranger" ];
|
||||
};
|
||||
}
|
@@ -1,14 +1,17 @@
|
||||
{ user, home }:
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{ config, inputs, ... }:
|
||||
{
|
||||
environment.persistence."/persist/state"."${home}/.config/sops-nix/key.txt" = { };
|
||||
environment.persistence."/persist"."${home}/.config/sops-nix/key.txt" = { };
|
||||
|
||||
home-manager.users.${user} = {
|
||||
imports = [ inputs.sops-nix.homeManagerModules.sops ];
|
||||
|
||||
sops.age.keyFile =
|
||||
config.environment.persistence."/persist/state"."${home}/.config/sops-nix/key.txt".source;
|
||||
config.environment.persistence."/persist"."${home}/.config/sops-nix/key.txt".source;
|
||||
home.sessionVariables.SOPS_AGE_KEY_FILE =
|
||||
config.environment.persistence."/persist/state"."${home}/.config/sops-nix/key.txt".source;
|
||||
config.environment.persistence."/persist"."${home}/.config/sops-nix/key.txt".source;
|
||||
};
|
||||
}
|
||||
|
@@ -1,8 +0,0 @@
|
||||
{ user, home }:
|
||||
{ ... }:
|
||||
{
|
||||
home-manager.users.${user} = {
|
||||
services.ssh-agent.enable = true;
|
||||
programs.ssh.addKeysToAgent = "yes";
|
||||
};
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user