Add kubernetes

Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
2025-01-28 12:38:08 +00:00
parent 7dbe22034a
commit 79e804f8bf
12 changed files with 1244 additions and 63 deletions

64
flake.lock generated
View File

@@ -2,7 +2,9 @@
"nodes": {
"ags": {
"inputs": {
"astal": "astal",
"astal": [
"astal"
],
"nixpkgs": [
"nixpkgs"
]
@@ -22,27 +24,6 @@
}
},
"astal": {
"inputs": {
"nixpkgs": [
"ags",
"nixpkgs"
]
},
"locked": {
"lastModified": 1735172721,
"narHash": "sha256-rtEAwGsHSppnkR3Qg3eRJ6Xh/F84IY9CrBBLzYabalY=",
"owner": "aylur",
"repo": "astal",
"rev": "6c84b64efc736e039a8a10774a4a1bf772c37aa2",
"type": "github"
},
"original": {
"owner": "aylur",
"repo": "astal",
"type": "github"
}
},
"astal_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
@@ -121,7 +102,9 @@
},
"flake-utils": {
"inputs": {
"systems": "systems"
"systems": [
"systems"
]
},
"locked": {
"lastModified": 1731533236,
@@ -160,11 +143,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1736505015,
"narHash": "sha256-bY3JTStgCgUZa6cE1GAc+c9ZCExCGvpjmPb7ANanhsc=",
"lastModified": 1738059769,
"narHash": "sha256-SBOwc5HSi0zThWoj3EfYh673X1d1dc78N2qCtcJmIvo=",
"owner": "karaolidis",
"repo": "nixpkgs",
"rev": "43ed29dceb72a444d29ec4b0b980deae63ea9791",
"rev": "befe9d27e7e7be485aae35d541f135c8471bd508",
"type": "github"
},
"original": {
@@ -174,26 +157,12 @@
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1736344531,
"narHash": "sha256-8YVQ9ZbSfuUk2bUf2KRj60NRraLPKPS0Q4QFTbc+c2c=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "bffc22eb12172e6db3c5dde9e3e5628f8e3e7912",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nur": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_2",
"nixpkgs": [
"nixpkgs"
],
"treefmt-nix": "treefmt-nix"
},
"locked": {
@@ -213,14 +182,15 @@
"root": {
"inputs": {
"ags": "ags",
"astal": "astal_2",
"astal": "astal",
"disko": "disko",
"flake-utils": "flake-utils",
"home-manager": "home-manager",
"nixpkgs": "nixpkgs",
"nur": "nur",
"sops-nix": "sops-nix",
"spicetify-nix": "spicetify-nix"
"spicetify-nix": "spicetify-nix",
"systems": "systems"
}
},
"sops-nix": {
@@ -248,6 +218,9 @@
"flake-compat": "flake-compat",
"nixpkgs": [
"nixpkgs"
],
"systems": [
"systems"
]
},
"locked": {
@@ -275,6 +248,7 @@
},
"original": {
"owner": "nix-systems",
"ref": "main",
"repo": "default",
"type": "github"
}

View File

@@ -41,8 +41,21 @@
inputs.nixpkgs.follows = "nixpkgs";
};
systems = {
type = "github";
owner = "nix-systems";
repo = "default";
ref = "main";
};
nur = {
url = "github:nix-community/NUR";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-utils = {
url = "github:numtide/flake-utils";
inputs.systems.follows = "systems";
};
astal = {
@@ -52,16 +65,18 @@
ags = {
url = "github:aylur/ags";
inputs.nixpkgs.follows = "nixpkgs";
inputs = {
nixpkgs.follows = "nixpkgs";
astal.follows = "astal";
};
};
spicetify-nix = {
url = "github:Gerg-L/spicetify-nix";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-utils = {
url = "github:numtide/flake-utils";
inputs = {
nixpkgs.follows = "nixpkgs";
systems.follows = "systems";
};
};
};

View File

@@ -0,0 +1,212 @@
{ config, ... }:
{
bootstrap-node-bootstrapper-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "create-csrs-for-bootstrapping";
};
subjects = [
{
kind = "Group";
name = "system:bootstrappers";
apiGroup = "rbac.authorization.k8s.io";
}
];
roleRef = {
kind = "ClusterRole";
name = "system:node-bootstrapper";
apiGroup = "rbac.authorization.k8s.io";
};
};
bootstrap-csr-nodeclient-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "auto-approve-csrs-for-group";
};
subjects = [
{
kind = "Group";
name = "system:bootstrappers";
apiGroup = "rbac.authorization.k8s.io";
}
];
roleRef = {
kind = "ClusterRole";
name = "system:certificates.k8s.io:certificatesigningrequests:nodeclient";
apiGroup = "rbac.authorization.k8s.io";
};
};
bootstrap-csr-selfnodeclient-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "auto-approve-renewals-for-nodes";
};
subjects = [
{
kind = "Group";
name = "system:nodes";
apiGroup = "rbac.authorization.k8s.io";
}
];
roleRef = {
kind = "ClusterRole";
name = "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient";
apiGroup = "rbac.authorization.k8s.io";
};
};
csr-approver-cr = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRole";
metadata = {
name = "kubelet-csr-approver";
};
rules = [
{
apiGroups = [ "certificates.k8s.io" ];
resources = [ "certificatesigningrequests" ];
verbs = [
"get"
"list"
"watch"
];
}
{
apiGroups = [ "coordination.k8s.io" ];
resources = [ "leases" ];
verbs = [
"create"
"get"
"update"
];
}
{
apiGroups = [ "certificates.k8s.io" ];
resources = [ "certificatesigningrequests/approval" ];
verbs = [ "update" ];
}
{
apiGroups = [ "certificates.k8s.io" ];
resourceNames = [ "kubernetes.io/kubelet-serving" ];
resources = [ "signers" ];
verbs = [ "approve" ];
}
{
apiGroups = [ "" ];
resources = [ "events" ];
verbs = [ "create" ];
}
];
};
csr-approver-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "kubelet-csr-approver";
namespace = "kube-system";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "kubelet-csr-approver";
};
subjects = [
{
kind = "ServiceAccount";
name = "kubelet-csr-approver";
namespace = "kube-system";
}
];
};
csr-approver-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
name = "kubelet-csr-approver";
namespace = "kube-system";
};
};
csr-approver-d = {
apiVersion = "apps/v1";
kind = "Deployment";
metadata = {
name = "kubelet-csr-approver";
namespace = "kube-system";
};
spec = {
replicas = 1;
selector = {
matchLabels = {
app = "kubelet-csr-approver";
};
};
template = {
metadata = {
labels = {
app = "kubelet-csr-approver";
};
};
spec = {
serviceAccountName = "kubelet-csr-approver";
containers = [
{
name = "kubelet-csr-approver";
image = "postfinance/kubelet-csr-approver:latest";
args = [
"-metrics-bind-address"
":8080"
"-health-probe-bind-address"
":8081"
];
livenessProbe = {
httpGet = {
path = "/healthz";
port = 8081;
};
};
resources = {
requests = {
cpu = "100m";
memory = "200Mi";
};
};
env = [
{
name = "PROVIDER_REGEX";
value = "^${config.services.kubernetes.kubelet.hostname}$";
}
{
name = "PROVIDER_IP_PREFIXES";
value = "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.0/8,169.254.0.0/16,::1/128,fe80::/10,fc00::/7";
}
{
name = "MAX_EXPIRATION_SEC";
value = "31622400";
}
{
name = "BYPASS_DNS_RESOLUTION";
value = "true";
}
];
}
];
tolerations = [
{
effect = "NoSchedule";
key = "node-role.kubernetes.io/control-plane";
operator = "Equal";
}
];
};
};
};
};
}

View File

@@ -0,0 +1,7 @@
{ config, lib, ... }:
{
services.kubernetes.addonManager.bootstrapAddons = lib.mkMerge [
(import ./bootstrap { inherit config; })
(import ./metrics-server { })
];
}

View File

@@ -0,0 +1,297 @@
{ ... }:
{
metrics-server-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "metrics-server";
namespace = "kube-system";
};
};
metrics-server-metrics-reader-cr = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRole";
metadata = {
labels = {
k8s-app = "metrics-server";
"rbac.authorization.k8s.io/aggregate-to-admin" = "true";
"rbac.authorization.k8s.io/aggregate-to-edit" = "true";
"rbac.authorization.k8s.io/aggregate-to-view" = "true";
};
name = "system:aggregated-metrics-reader";
};
rules = [
{
apiGroups = [ "metrics.k8s.io" ];
resources = [
"pods"
"nodes"
];
verbs = [
"get"
"list"
"watch"
];
}
];
};
metrics-server-cr = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRole";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "system:metrics-server";
};
rules = [
{
apiGroups = [ "" ];
resources = [ "nodes/metrics" ];
verbs = [ "get" ];
}
{
apiGroups = [ "" ];
resources = [
"pods"
"nodes"
];
verbs = [
"get"
"list"
"watch"
];
}
];
};
metrics-server-rb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "RoleBinding";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "metrics-server-auth-reader";
namespace = "kube-system";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "Role";
name = "extension-apiserver-authentication-reader";
};
subjects = [
{
kind = "ServiceAccount";
name = "metrics-server";
namespace = "kube-system";
}
];
};
metrics-server-auth-delegator-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "metrics-server:system:auth-delegator";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "system:auth-delegator";
};
subjects = [
{
kind = "ServiceAccount";
name = "metrics-server";
namespace = "kube-system";
}
];
};
metrics-server-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "system:metrics-server";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "system:metrics-server";
};
subjects = [
{
kind = "ServiceAccount";
name = "metrics-server";
namespace = "kube-system";
}
];
};
metrics-server-s = {
apiVersion = "v1";
kind = "Service";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "metrics-server";
namespace = "kube-system";
};
spec = {
ports = [
{
name = "https";
port = 443;
protocol = "TCP";
targetPort = "https";
}
];
selector = {
k8s-app = "metrics-server";
};
};
};
metrics-server-d = {
apiVersion = "apps/v1";
kind = "Deployment";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "metrics-server";
namespace = "kube-system";
};
spec = {
selector = {
matchLabels = {
k8s-app = "metrics-server";
};
};
strategy = {
rollingUpdate = {
maxUnavailable = 0;
};
};
template = {
metadata = {
labels = {
k8s-app = "metrics-server";
};
};
spec = {
containers = [
{
args = [
"--cert-dir=/tmp"
"--secure-port=10250"
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"
"--kubelet-use-node-status-port"
"--metric-resolution=15s"
];
image = "registry.k8s.io/metrics-server/metrics-server:v0.7.2";
imagePullPolicy = "IfNotPresent";
livenessProbe = {
failureThreshold = 3;
httpGet = {
path = "/livez";
port = "https";
scheme = "HTTPS";
};
periodSeconds = 10;
};
name = "metrics-server";
ports = [
{
containerPort = 10250;
name = "https";
protocol = "TCP";
}
];
readinessProbe = {
failureThreshold = 3;
httpGet = {
path = "/readyz";
port = "https";
scheme = "HTTPS";
};
initialDelaySeconds = 20;
periodSeconds = 10;
};
resources = {
requests = {
cpu = "100m";
memory = "200Mi";
};
};
securityContext = {
allowPrivilegeEscalation = false;
capabilities = {
drop = [ "ALL" ];
};
readOnlyRootFilesystem = true;
runAsNonRoot = true;
runAsUser = 1000;
seccompProfile = {
type = "RuntimeDefault";
};
};
volumeMounts = [
{
mountPath = "/tmp";
name = "tmp-dir";
}
];
}
];
nodeSelector = {
"kubernetes.io/os" = "linux";
};
priorityClassName = "system-cluster-critical";
serviceAccountName = "metrics-server";
volumes = [
{
emptyDir = { };
name = "tmp-dir";
}
];
};
};
};
};
metrics-server-apis = {
apiVersion = "apiregistration.k8s.io/v1";
kind = "APIService";
metadata = {
labels = {
k8s-app = "metrics-server";
};
name = "v1beta1.metrics.k8s.io";
};
spec = {
group = "metrics.k8s.io";
groupPriorityMinimum = 100;
insecureSkipTLSVerify = true;
service = {
name = "metrics-server";
namespace = "kube-system";
};
version = "v1beta1";
versionPriority = 100;
};
};
}

View File

@@ -0,0 +1,233 @@
{
config,
lib,
pkgs,
...
}:
let
adminKubeconfig = config.services.kubernetes.lib.mkKubeConfig "admin" {
caFile = config.sops.secrets."kubernetes/ca/crt".path;
keyFile = config.sops.secrets."kubernetes/accounts/admin/key".path;
certFile = config.sops.secrets."kubernetes/accounts/admin/crt".path;
server = config.services.kubernetes.apiserverAddress;
};
in
{
imports = [
./addons
./secrets
];
environment = {
persistence."/persist" = {
"/var/lib/containerd" = { };
"/var/lib/kubernetes" = { };
"/var/lib/kubelet" = { };
"/var/lib/etcd" = { };
};
etc."kubeconfig".source = adminKubeconfig;
systemPackages = with pkgs; [ kubectl ];
};
services = {
kubernetes = {
roles = [
"master"
"node"
];
masterAddress = "localhost";
easyCerts = false;
caFile = config.sops.secrets."kubernetes/ca/crt".path;
addonManager.enable = true;
apiserver = {
allowPrivileged = true;
clientCaFile = config.sops.secrets."kubernetes/ca/crt".path;
kubeletClientCaFile = config.sops.secrets."kubernetes/ca/crt".path;
tlsKeyFile = config.sops.secrets."kubernetes/apiserver/cert/key".path;
tlsCertFile = config.sops.secrets."kubernetes/apiserver/cert/crt".path;
kubeletClientKeyFile = config.sops.secrets."kubernetes/apiserver/kubelet-client/key".path;
kubeletClientCertFile = config.sops.secrets."kubernetes/apiserver/kubelet-client/crt".path;
proxyClientKeyFile = config.sops.secrets."kubernetes/front-proxy/client/key".path;
proxyClientCertFile = config.sops.secrets."kubernetes/front-proxy/client/crt".path;
serviceAccountSigningKeyFile = config.sops.secrets."kubernetes/sa/key".path;
serviceAccountKeyFile = config.sops.secrets."kubernetes/sa/pub".path;
extraOpts = lib.strings.concatStringsSep " " [
"--enable-bootstrap-token-auth=true"
"--token-auth-file=${config.sops.secrets."kubernetes/accounts/kubelet-bootstrap/csv".path}"
"--requestheader-client-ca-file=${config.sops.secrets."kubernetes/front-proxy/ca/crt".path}"
"--requestheader-allowed-names=front-proxy-client"
"--requestheader-extra-headers-prefix=X-Remote-Extra-"
"--requestheader-group-headers=X-Remote-Group"
"--requestheader-username-headers=X-Remote-User"
];
etcd = {
servers = [ "https://etcd.local:2379" ];
caFile = config.sops.secrets."kubernetes/etcd/ca/crt".path;
keyFile = config.sops.secrets."kubernetes/apiserver/etcd-client/key".path;
certFile = config.sops.secrets."kubernetes/apiserver/etcd-client/crt".path;
};
};
controllerManager = {
rootCaFile = config.sops.secrets."kubernetes/ca/crt".path;
serviceAccountKeyFile = config.sops.secrets."kubernetes/sa/key".path;
extraOpts = lib.strings.concatStringsSep " " [
"--client-ca-file=${config.sops.secrets."kubernetes/ca/crt".path}"
"--cluster-signing-cert-file=${config.sops.secrets."kubernetes/ca/crt".path}"
"--cluster-signing-key-file=${config.sops.secrets."kubernetes/ca/key".path}"
"--requestheader-client-ca-file=${config.sops.secrets."kubernetes/front-proxy/ca/crt".path}"
];
kubeconfig = {
caFile = config.sops.secrets."kubernetes/ca/crt".path;
keyFile = config.sops.secrets."kubernetes/accounts/controller-manager/key".path;
certFile = config.sops.secrets."kubernetes/accounts/controller-manager/crt".path;
};
};
kubelet = {
clientCaFile = config.sops.secrets."kubernetes/ca/crt".path;
extraOpts = lib.strings.concatStringsSep " " [
"--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubeconfig"
"--kubeconfig=/var/lib/kubelet/kubeconfig"
"--cert-dir=/var/lib/kubelet"
];
extraConfig = {
failSwapOn = false;
rotateCertificates = true;
serverTLSBootstrap = true;
memorySwap.swapBehavior = "LimitedSwap";
};
featureGates = {
RotateKubeletServerCertificate = true;
NodeSwap = true;
};
};
proxy.kubeconfig = {
caFile = config.sops.secrets."kubernetes/ca/crt".path;
keyFile = config.sops.secrets."kubernetes/accounts/proxy/key".path;
certFile = config.sops.secrets."kubernetes/accounts/proxy/crt".path;
};
scheduler.kubeconfig = {
caFile = config.sops.secrets."kubernetes/ca/crt".path;
keyFile = config.sops.secrets."kubernetes/accounts/scheduler/key".path;
certFile = config.sops.secrets."kubernetes/accounts/scheduler/crt".path;
};
};
etcd = {
keyFile = config.sops.secrets."kubernetes/etcd/server/key".path;
certFile = config.sops.secrets."kubernetes/etcd/server/crt".path;
peerKeyFile = config.sops.secrets."kubernetes/etcd/peer/key".path;
peerCertFile = config.sops.secrets."kubernetes/etcd/peer/crt".path;
trustedCaFile = config.sops.secrets."kubernetes/etcd/ca/crt".path;
peerTrustedCaFile = config.sops.secrets."kubernetes/etcd/ca/crt".path;
listenClientUrls = [ "https://127.0.0.1:2379" ];
listenPeerUrls = [ "https://127.0.0.1:2380" ];
advertiseClientUrls = [ "https://etcd.local:2379" ];
initialCluster = [ "${config.services.kubernetes.masterAddress}=https://etcd.local:2380" ];
initialAdvertisePeerUrls = [ "https://etcd.local:2380" ];
};
flannel.kubeconfig = config.services.kubernetes.lib.mkKubeConfig "flannel" {
caFile = config.sops.secrets."kubernetes/ca/crt".path;
keyFile = config.sops.secrets."kubernetes/accounts/flannel/key".path;
certFile = config.sops.secrets."kubernetes/accounts/flannel/crt".path;
server = config.services.kubernetes.apiserverAddress;
};
};
networking = {
firewall.enable = false;
extraHosts = lib.strings.optionalString (config.services.etcd.enable) ''
127.0.0.1 etcd.${config.services.kubernetes.addons.dns.clusterDomain} etcd.local
'';
};
systemd.services = {
kube-addon-manager = {
after = [
"sops-nix.service"
config.environment.persistence."/persist"."/var/lib/kubernetes".mount
];
environment.KUBECONFIG = config.services.kubernetes.lib.mkKubeConfig "addon-manager" {
caFile = config.sops.secrets."kubernetes/ca/crt".path;
keyFile = config.sops.secrets."kubernetes/accounts/addon-manager/key".path;
certFile = config.sops.secrets."kubernetes/accounts/addon-manager/crt".path;
server = config.services.kubernetes.apiserverAddress;
};
serviceConfig.PermissionsStartOnly = true;
preStart = ''
export KUBECONFIG=${adminKubeconfig}
${config.services.kubernetes.package}/bin/kubectl apply -f ${
lib.strings.concatStringsSep " \\\n -f " (
lib.attrsets.mapAttrsToList (
n: v: pkgs.writeText "${n}.json" (builtins.toJSON v)
) config.services.kubernetes.addonManager.bootstrapAddons
)
}
'';
};
kubelet = {
preStart = ''
mkdir -p /etc/kubernetes
cat > /etc/kubernetes/bootstrap-kubeconfig <<EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${config.sops.secrets."kubernetes/ca/crt".path}
server: ${config.services.kubernetes.apiserverAddress}
name: local
contexts:
- context:
cluster: local
user: kubelet-bootstrap
name: bootstrap
current-context: bootstrap
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: $(<${config.sops.secrets."kubernetes/accounts/kubelet-bootstrap/token".path})
EOF
'';
after = [
"sops-nix.service"
config.environment.persistence."/persist"."/var/lib/kubelet".mount
];
};
kube-apiserver.after = [
"sops-nix.service"
config.environment.persistence."/persist"."/var/lib/kubernetes".mount
];
etcd.after = [
"sops-nix.service"
config.environment.persistence."/persist"."/var/lib/etcd".mount
];
kube-controller-manager.after = [ "sops-nix.service" ];
kube-proxy.after = [ "sops-nix.service" ];
kube-scheduler.after = [ "sops-nix.service" ];
flannel.after = [ "sops-nix.service" ];
};
}

View File

@@ -0,0 +1,204 @@
{ ... }:
{
sops.secrets = {
"kubernetes/ca/crt" = {
owner = "kubernetes";
group = "users";
mode = "0440";
};
"kubernetes/ca/key" = {
owner = "kubernetes";
group = "users";
mode = "0440";
};
"kubernetes/front-proxy/ca/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/front-proxy/ca/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/etcd/ca/crt" = {
owner = "etcd";
group = "kubernetes";
mode = "0440";
};
"kubernetes/etcd/ca/key" = {
owner = "etcd";
group = "kubernetes";
mode = "0440";
};
"kubernetes/apiserver/cert/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/apiserver/cert/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/apiserver/kubelet-client/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/apiserver/kubelet-client/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/apiserver/etcd-client/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/apiserver/etcd-client/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/front-proxy/client/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/front-proxy/client/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/etcd/server/crt" = {
owner = "etcd";
group = "kubernetes";
mode = "0440";
};
"kubernetes/etcd/server/key" = {
owner = "etcd";
group = "kubernetes";
mode = "0440";
};
"kubernetes/etcd/peer/crt" = {
owner = "etcd";
group = "kubernetes";
mode = "0440";
};
"kubernetes/etcd/peer/key" = {
owner = "etcd";
group = "kubernetes";
mode = "0440";
};
"kubernetes/sa/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/sa/pub" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/admin/crt" = {
group = "kubernetes";
};
"kubernetes/accounts/admin/key" = {
group = "kubernetes";
};
"kubernetes/accounts/controller-manager/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/controller-manager/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/addon-manager/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/addon-manager/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/scheduler/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/scheduler/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/proxy/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/proxy/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/flannel/crt" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/flannel/key" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/kubelet-bootstrap/token" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
"kubernetes/accounts/kubelet-bootstrap/csv" = {
owner = "kubernetes";
group = "kubernetes";
mode = "0440";
};
};
}

View File

@@ -0,0 +1,210 @@
#!/usr/bin/env -S nix shell nixpkgs#openssl nixpkgs#yq-go nixpkgs#sops -c bash
set -o errexit
set -o pipefail
generate_ca() {
local target_dir=$1
local ca_name=$2
local ca_days=$3
local cn=$4
mkdir -p "${target_dir}"
local ca_key=${target_dir}/${ca_name}.key
local ca_cert=${target_dir}/${ca_name}.crt
openssl genrsa -out "${ca_key}" 2048
openssl req -x509 -new -nodes -key "${ca_key}" -days "${ca_days}" -out "${ca_cert}" -subj "/CN=${cn}"
}
generate_alt_names() {
local hosts=("$@")
local dns=0
local ip=0
local alt_names=""
for host in "${hosts[@]}"; do
if [[ ${host} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
alt_names="${alt_names}IP.${ip} = ${host}\n"
((ip++))
else
alt_names="${alt_names}DNS.${dns} = ${host}\n"
((dns++))
fi
done
echo -e "${alt_names}"
}
generate_cnf() {
local target_dir=$1
local cnf_name=$2
local cn=$3
local hosts=("${@:4}")
mkdir -p "${target_dir}"
local cnf_file=${target_dir}/${cnf_name}.cnf
cat <<EOF > "${cnf_file}"
[req]
prompt = no
[ req_ext ]
subjectAltName = @alt_names
[ alt_names ]
$(generate_alt_names "${hosts[@]}")
[ v3_ext ]
authorityKeyIdentifier=keyid,issuer:always
basicConstraints=CA:FALSE
keyUsage=keyEncipherment,dataEncipherment,digitalSignature
extendedKeyUsage=serverAuth,clientAuth
subjectAltName=@alt_names
EOF
}
generate_crt() {
local target_dir=$1
local cert_name=$2
local cert_days=$3
local cn=$4
local o=$5
local ca_key=$6
local ca_cert=$7
local hosts=("${@:8}")
mkdir -p "${target_dir}"
local cert_key=${target_dir}/${cert_name}.key
local cert_csr=${target_dir}/${cert_name}.csr
local cert_cert=${target_dir}/${cert_name}.crt
openssl genrsa -out "${cert_key}" 2048
local subject="/CN=${cn}"
if [ -n "${o}" ]; then
subject="${subject}/O=${o}"
fi
if [ -n "${hosts}" ]; then
generate_cnf "${target_dir}" "${cert_name}" "${cn}" "${hosts[@]}"
openssl req -new -key "${cert_key}" -out "${cert_csr}" -subj "${subject}" -config "${target_dir}"/"${cert_name}".cnf
openssl x509 -req -in "${cert_csr}" -CA "${ca_cert}" -CAkey "${ca_key}" -CAcreateserial -out "${cert_cert}" -days "${cert_days}" -extfile "${target_dir}"/"${cert_name}".cnf -extensions v3_ext
else
openssl req -new -key "${cert_key}" -out "${cert_csr}" -subj "${subject}"
openssl x509 -req -in "${cert_csr}" -CA "${ca_cert}" -CAkey "${ca_key}" -CAcreateserial -out "${cert_cert}" -days "${cert_days}"
fi
}
generate_key_pair() {
local target_dir=$1
local key_name=$2
mkdir -p "${target_dir}"
local private_key=${target_dir}/${key_name}.key
local public_key=${target_dir}/${key_name}.pub
openssl genrsa -out "${private_key}" 2048
openssl rsa -in "${private_key}" -pubout -out "${public_key}"
}
generate_auth_token() {
local target_dir=$1
local token_name=$2
local user=$3
local id=$4
local groups=$5
mkdir -p "${target_dir}"
local token_file="${target_dir}/${token_name}.token"
local token_auth_file="${target_dir}/${token_name}.csv"
token="$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')"
echo "${token}" > "${token_file}"
echo "${token},${user},${id},\"${groups}\"" > "${token_auth_file}"
}
DEFAULT_CA_DAYS=3650
if [[ -z "$SOPS_AGE_KEY_FILE" ]]; then
echo "Please set the SOPS_AGE_KEY_FILE environment variable"
exit 1
fi
hostname=${1:-$(hostname)}
if [ -z "${hostname}" ]; then
echo "Usage: $0 [hostname]"
exit 1
fi
generate_ca out ca ${DEFAULT_CA_DAYS} kubernetes-ca ""
generate_ca out/front-proxy ca ${DEFAULT_CA_DAYS} kubernetes-front-proxy-ca ""
generate_ca out/etcd ca ${DEFAULT_CA_DAYS} etcd-ca ""
generate_crt out/apiserver cert ${DEFAULT_CA_DAYS} kube-apiserver "" out/ca.key out/ca.crt "kubernetes" "kubernetes.default" "kubernetes.default.svc" "kubernetes.default.svc.cluster" "kubernetes.default.svc.cluster.local" "localhost" "10.0.0.1" "127.0.0.1"
generate_crt out/apiserver kubelet-client ${DEFAULT_CA_DAYS} kube-apiserver-kubelet-client system:masters out/ca.key out/ca.crt ""
generate_crt out/apiserver etcd-client ${DEFAULT_CA_DAYS} kube-apiserver-etcd-client "" out/etcd/ca.key out/etcd/ca.crt ""
generate_crt out/front-proxy client ${DEFAULT_CA_DAYS} front-proxy-client "" out/front-proxy/ca.key out/front-proxy/ca.crt ""
generate_crt out/etcd server ${DEFAULT_CA_DAYS} kube-etcd "" out/etcd/ca.key out/etcd/ca.crt "etcd.local" "etcd.cluster.local" "localhost" "127.0.0.1"
generate_crt out/etcd peer ${DEFAULT_CA_DAYS} kube-etcd-peer "" out/etcd/ca.key out/etcd/ca.crt "etcd.local" "etcd.cluster.local" "localhost" "127.0.0.1"
generate_key_pair out sa
generate_crt out/accounts admin ${DEFAULT_CA_DAYS} kubernetes-admin system:masters out/ca.key out/ca.crt ""
generate_crt out/accounts users ${DEFAULT_CA_DAYS} kubernetes-users system:masters out/ca.key out/ca.crt ""
generate_crt out/accounts controller-manager ${DEFAULT_CA_DAYS} system:kube-controller-manager "" out/ca.key out/ca.crt ""
generate_crt out/accounts addon-manager ${DEFAULT_CA_DAYS} system:kube-addon-manager "" out/ca.key out/ca.crt ""
generate_crt out/accounts scheduler ${DEFAULT_CA_DAYS} system:kube-scheduler "" out/ca.key out/ca.crt ""
generate_crt out/accounts proxy ${DEFAULT_CA_DAYS} system:kube-proxy "" out/ca.key out/ca.crt ""
generate_crt out/accounts flannel ${DEFAULT_CA_DAYS} flannel-client "" out/ca.key out/ca.crt ""
generate_auth_token out/accounts kubelet-bootstrap "kubelet-bootstrap" 10001 "system:bootstrappers"
sops_config="../../../../../$(hostname)/secrets/sops.yaml"
secrets_file="../../../../../$(hostname)/secrets/secrets.yaml"
decrypted_secrets_file="../../../../../$(hostname)/secrets/.decrypted~secrets.yaml"
sops -d "${secrets_file}" > "${decrypted_secrets_file}"
yq -i '
del(.kubernetes) |
.kubernetes.ca.crt = load_str("out/ca.crt") |
.kubernetes.ca.key = load_str("out/ca.key") |
.kubernetes.front-proxy.ca.crt = load_str("out/front-proxy/ca.crt") |
.kubernetes.front-proxy.ca.key = load_str("out/front-proxy/ca.key") |
.kubernetes.etcd.ca.crt = load_str("out/etcd/ca.crt") |
.kubernetes.etcd.ca.key = load_str("out/etcd/ca.key") |
.kubernetes.apiserver.cert.crt = load_str("out/apiserver/cert.crt") |
.kubernetes.apiserver.cert.key = load_str("out/apiserver/cert.key") |
.kubernetes.apiserver.kubelet-client.crt = load_str("out/apiserver/kubelet-client.crt") |
.kubernetes.apiserver.kubelet-client.key = load_str("out/apiserver/kubelet-client.key") |
.kubernetes.apiserver.etcd-client.crt = load_str("out/apiserver/etcd-client.crt") |
.kubernetes.apiserver.etcd-client.key = load_str("out/apiserver/etcd-client.key") |
.kubernetes.front-proxy.client.crt = load_str("out/front-proxy/client.crt") |
.kubernetes.front-proxy.client.key = load_str("out/front-proxy/client.key") |
.kubernetes.etcd.server.crt = load_str("out/etcd/server.crt") |
.kubernetes.etcd.server.key = load_str("out/etcd/server.key") |
.kubernetes.etcd.peer.crt = load_str("out/etcd/peer.crt") |
.kubernetes.etcd.peer.key = load_str("out/etcd/peer.key") |
.kubernetes.sa.key = load_str("out/sa.key") |
.kubernetes.sa.pub = load_str("out/sa.pub") |
.kubernetes.accounts.admin.crt = load_str("out/accounts/admin.crt") |
.kubernetes.accounts.admin.key = load_str("out/accounts/admin.key") |
.kubernetes.accounts.users.crt = load_str("out/accounts/users.crt") |
.kubernetes.accounts.users.key = load_str("out/accounts/users.key") |
.kubernetes.accounts.controller-manager.crt = load_str("out/accounts/controller-manager.crt") |
.kubernetes.accounts.controller-manager.key = load_str("out/accounts/controller-manager.key") |
.kubernetes.accounts.addon-manager.crt = load_str("out/accounts/addon-manager.crt") |
.kubernetes.accounts.addon-manager.key = load_str("out/accounts/addon-manager.key") |
.kubernetes.accounts.scheduler.crt = load_str("out/accounts/scheduler.crt") |
.kubernetes.accounts.scheduler.key = load_str("out/accounts/scheduler.key") |
.kubernetes.accounts.proxy.crt = load_str("out/accounts/proxy.crt") |
.kubernetes.accounts.proxy.key = load_str("out/accounts/proxy.key") |
.kubernetes.accounts.flannel.crt = load_str("out/accounts/flannel.crt") |
.kubernetes.accounts.flannel.key = load_str("out/accounts/flannel.key") |
.kubernetes.accounts.kubelet-bootstrap.token = load_str("out/accounts/kubelet-bootstrap.token") |
.kubernetes.accounts.kubelet-bootstrap.csv = load_str("out/accounts/kubelet-bootstrap.csv")
' "${decrypted_secrets_file}"
sops --config "${sops_config}" -e "${decrypted_secrets_file}" > "${secrets_file}"
rm -rf out

View File

@@ -2,7 +2,12 @@
user ? throw "user argument is required",
home ? throw "home argument is required",
}:
{ pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
{
nixpkgs.overlays = [
(final: prev: {
@@ -17,13 +22,38 @@
"/cache"."${home}/.kube/cache" = { };
};
users.users.${user}.extraGroups = [ "kubernetes" ];
sops.secrets = {
"kubernetes/accounts/${user}/crt" = {
key = "kubernetes/accounts/users/crt";
group = "users";
mode = "0440";
};
"kubernetes/accounts/${user}/key" = {
key = "kubernetes/accounts/users/key";
group = "users";
mode = "0440";
};
};
home-manager.users.${user} = {
home.packages = with pkgs; [
kubectl
kubernetes-helm
kustomize
kind
];
home = {
packages = with pkgs; [
kubectl
kustomize
kubernetes-helm
kompose
];
file.".kube/local".source = config.services.kubernetes.lib.mkKubeConfig user {
caFile = config.sops.secrets."kubernetes/ca/crt".path;
certFile = config.sops.secrets."kubernetes/accounts/${user}/crt".path;
keyFile = config.sops.secrets."kubernetes/accounts/${user}/key".path;
server = config.services.kubernetes.apiserverAddress;
};
};
programs = {
k9s = {

View File

@@ -24,6 +24,7 @@
../common/configs/system/git
../common/configs/system/gpg-agent
../common/configs/system/impermanence
../common/configs/system/kubernetes
../common/configs/system/neovim
../common/configs/system/networking
../common/configs/system/nix

View File

@@ -4,13 +4,11 @@ set -o errexit
set -o nounset
set -o pipefail
if [[ "$#" -ne 1 ]]; then
echo "Usage: $0 <sops-master-key>"
if [[ -z "$SOPS_AGE_KEY_FILE" ]]; then
echo "Please set the SOPS_AGE_KEY_FILE environment variable"
exit 1
fi
export SOPS_AGE_KEY_FILE="$1"
find . -type f -name 'sops.yaml' | while IFS= read -r sops_file; do
dir=$(dirname "$sops_file")
echo "$dir"