Remove kubernetes
Fuck this arcane wizardry cluster bollocks piece of crap Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
6
flake.lock
generated
6
flake.lock
generated
@@ -143,11 +143,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1738150270,
|
||||
"narHash": "sha256-GkH7I9LW0aFklGc3YxjaBW7TtJy5aWHE0rPBUuz35Hk=",
|
||||
"lastModified": 1736505015,
|
||||
"narHash": "sha256-bY3JTStgCgUZa6cE1GAc+c9ZCExCGvpjmPb7ANanhsc=",
|
||||
"owner": "karaolidis",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e8e18ef6309d021fa600f5aa2665963d8cf76ab7",
|
||||
"rev": "43ed29dceb72a444d29ec4b0b980deae63ea9791",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@@ -1,52 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
./options
|
||||
./secrets
|
||||
];
|
||||
|
||||
environment = {
|
||||
persistence."/persist" = {
|
||||
"/var/lib/containerd" = { };
|
||||
"/var/lib/kubernetes" = { };
|
||||
"/var/lib/kubelet" = { };
|
||||
"/var/lib/etcd" = { };
|
||||
};
|
||||
|
||||
etc."kubeconfig".source = config.services.kubernetes.kubeconfigs.admin;
|
||||
systemPackages = with pkgs; [ kubectl ];
|
||||
};
|
||||
|
||||
services = {
|
||||
kubernetes = {
|
||||
enable = true;
|
||||
|
||||
roles = [
|
||||
"master"
|
||||
"node"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
kube-addon-manager.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/kubernetes".mount
|
||||
];
|
||||
|
||||
kubelet.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/kubelet".mount
|
||||
];
|
||||
|
||||
kube-apiserver.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/kubernetes".mount
|
||||
];
|
||||
|
||||
etcd.after = [
|
||||
config.environment.persistence."/persist"."/var/lib/etcd".mount
|
||||
];
|
||||
};
|
||||
}
|
@@ -1,70 +0,0 @@
|
||||
{ ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "Role";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "*" ];
|
||||
resources = [ "*" ];
|
||||
verbs = [ "*" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "RoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "Role";
|
||||
name = "system:kube-addon-manager";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "User";
|
||||
name = "system:kube-addon-manager";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager:cluster-lister";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "*" ];
|
||||
resources = [ "*" ];
|
||||
verbs = [ "list" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-addon-manager:cluster-lister";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:kube-addon-manager:cluster-lister";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "User";
|
||||
name = "system:kube-addon-manager";
|
||||
}
|
||||
];
|
||||
}
|
||||
]
|
@@ -1,206 +0,0 @@
|
||||
{ config, ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "create-csrs-for-bootstrapping";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "Group";
|
||||
name = "system:bootstrappers";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
}
|
||||
];
|
||||
roleRef = {
|
||||
kind = "ClusterRole";
|
||||
name = "system:node-bootstrapper";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "auto-approve-csrs-for-group";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "Group";
|
||||
name = "system:bootstrappers";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
}
|
||||
];
|
||||
roleRef = {
|
||||
kind = "ClusterRole";
|
||||
name = "system:certificates.k8s.io:certificatesigningrequests:nodeclient";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "auto-approve-renewals-for-nodes";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "Group";
|
||||
name = "system:nodes";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
}
|
||||
];
|
||||
roleRef = {
|
||||
kind = "ClusterRole";
|
||||
name = "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient";
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "certificates.k8s.io" ];
|
||||
resources = [ "certificatesigningrequests" ];
|
||||
verbs = [
|
||||
"get"
|
||||
"list"
|
||||
"watch"
|
||||
];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "coordination.k8s.io" ];
|
||||
resources = [ "leases" ];
|
||||
verbs = [
|
||||
"create"
|
||||
"get"
|
||||
"update"
|
||||
];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "certificates.k8s.io" ];
|
||||
resources = [ "certificatesigningrequests/approval" ];
|
||||
verbs = [ "update" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "certificates.k8s.io" ];
|
||||
resourceNames = [ "kubernetes.io/kubelet-serving" ];
|
||||
resources = [ "signers" ];
|
||||
verbs = [ "approve" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "events" ];
|
||||
verbs = [ "create" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "kubelet-csr-approver";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "apps/v1";
|
||||
kind = "Deployment";
|
||||
metadata = {
|
||||
name = "kubelet-csr-approver";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
replicas = 1;
|
||||
selector = {
|
||||
matchLabels = {
|
||||
app = "kubelet-csr-approver";
|
||||
};
|
||||
};
|
||||
template = {
|
||||
metadata = {
|
||||
labels = {
|
||||
app = "kubelet-csr-approver";
|
||||
};
|
||||
};
|
||||
spec = {
|
||||
serviceAccountName = "kubelet-csr-approver";
|
||||
containers = [
|
||||
{
|
||||
name = "kubelet-csr-approver";
|
||||
image = "postfinance/kubelet-csr-approver:latest";
|
||||
args = [
|
||||
"-metrics-bind-address"
|
||||
":8080"
|
||||
"-health-probe-bind-address"
|
||||
":8081"
|
||||
];
|
||||
livenessProbe = {
|
||||
httpGet = {
|
||||
path = "/healthz";
|
||||
port = 8081;
|
||||
};
|
||||
};
|
||||
resources = {
|
||||
requests = {
|
||||
cpu = "100m";
|
||||
memory = "200Mi";
|
||||
};
|
||||
};
|
||||
env = [
|
||||
{
|
||||
name = "PROVIDER_REGEX";
|
||||
value = "^${config.networking.fqdnOrHostName}$";
|
||||
}
|
||||
{
|
||||
name = "PROVIDER_IP_PREFIXES";
|
||||
value = "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.0/8,169.254.0.0/16,::1/128,fe80::/10,fc00::/7";
|
||||
}
|
||||
{
|
||||
name = "MAX_EXPIRATION_SEC";
|
||||
value = "31622400";
|
||||
}
|
||||
{
|
||||
name = "BYPASS_DNS_RESOLUTION";
|
||||
value = "true";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
tolerations = [
|
||||
{
|
||||
effect = "NoSchedule";
|
||||
key = "node-role.kubernetes.io/control-plane";
|
||||
operator = "Equal";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
]
|
@@ -1,21 +0,0 @@
|
||||
{ ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-apiserver:kubelet-api-admin";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:kubelet-api-admin";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "User";
|
||||
name = "system:kube-apiserver";
|
||||
}
|
||||
];
|
||||
}
|
||||
]
|
@@ -1,289 +0,0 @@
|
||||
{ ... }:
|
||||
[
|
||||
{
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
"rbac.authorization.k8s.io/aggregate-to-admin" = "true";
|
||||
"rbac.authorization.k8s.io/aggregate-to-edit" = "true";
|
||||
"rbac.authorization.k8s.io/aggregate-to-view" = "true";
|
||||
};
|
||||
name = "system:aggregated-metrics-reader";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "metrics.k8s.io" ];
|
||||
resources = [
|
||||
"pods"
|
||||
"nodes"
|
||||
];
|
||||
verbs = [
|
||||
"get"
|
||||
"list"
|
||||
"watch"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "system:metrics-server";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "nodes/metrics" ];
|
||||
verbs = [ "get" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [
|
||||
"pods"
|
||||
"nodes"
|
||||
];
|
||||
verbs = [
|
||||
"get"
|
||||
"list"
|
||||
"watch"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "RoleBinding";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server-auth-reader";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "Role";
|
||||
name = "extension-apiserver-authentication-reader";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server:system:auth-delegator";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:auth-delegator";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "system:metrics-server";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:metrics-server";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
apiVersion = "v1";
|
||||
kind = "Service";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
ports = [
|
||||
{
|
||||
name = "https";
|
||||
port = 443;
|
||||
protocol = "TCP";
|
||||
targetPort = "https";
|
||||
}
|
||||
];
|
||||
selector = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "apps/v1";
|
||||
kind = "Deployment";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
};
|
||||
strategy = {
|
||||
rollingUpdate = {
|
||||
maxUnavailable = 0;
|
||||
};
|
||||
};
|
||||
template = {
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
};
|
||||
spec = {
|
||||
containers = [
|
||||
{
|
||||
args = [
|
||||
"--cert-dir=/tmp"
|
||||
"--secure-port=10250"
|
||||
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"
|
||||
"--kubelet-use-node-status-port"
|
||||
"--metric-resolution=15s"
|
||||
];
|
||||
image = "registry.k8s.io/metrics-server/metrics-server:v0.7.2";
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
livenessProbe = {
|
||||
failureThreshold = 3;
|
||||
httpGet = {
|
||||
path = "/livez";
|
||||
port = "https";
|
||||
scheme = "HTTPS";
|
||||
};
|
||||
periodSeconds = 10;
|
||||
};
|
||||
name = "metrics-server";
|
||||
ports = [
|
||||
{
|
||||
containerPort = 10250;
|
||||
name = "https";
|
||||
protocol = "TCP";
|
||||
}
|
||||
];
|
||||
readinessProbe = {
|
||||
failureThreshold = 3;
|
||||
httpGet = {
|
||||
path = "/readyz";
|
||||
port = "https";
|
||||
scheme = "HTTPS";
|
||||
};
|
||||
initialDelaySeconds = 20;
|
||||
periodSeconds = 10;
|
||||
};
|
||||
resources = {
|
||||
requests = {
|
||||
cpu = "100m";
|
||||
memory = "200Mi";
|
||||
};
|
||||
};
|
||||
securityContext = {
|
||||
allowPrivilegeEscalation = false;
|
||||
capabilities = {
|
||||
drop = [ "ALL" ];
|
||||
};
|
||||
readOnlyRootFilesystem = true;
|
||||
runAsNonRoot = true;
|
||||
runAsUser = 1000;
|
||||
seccompProfile = {
|
||||
type = "RuntimeDefault";
|
||||
};
|
||||
};
|
||||
volumeMounts = [
|
||||
{
|
||||
mountPath = "/tmp";
|
||||
name = "tmp-dir";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
nodeSelector = {
|
||||
"kubernetes.io/os" = "linux";
|
||||
};
|
||||
priorityClassName = "system-cluster-critical";
|
||||
serviceAccountName = "metrics-server";
|
||||
volumes = [
|
||||
{
|
||||
emptyDir = { };
|
||||
name = "tmp-dir";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
{
|
||||
apiVersion = "apiregistration.k8s.io/v1";
|
||||
kind = "APIService";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "metrics-server";
|
||||
};
|
||||
name = "v1beta1.metrics.k8s.io";
|
||||
};
|
||||
spec = {
|
||||
group = "metrics.k8s.io";
|
||||
groupPriorityMinimum = 100;
|
||||
insecureSkipTLSVerify = true;
|
||||
service = {
|
||||
name = "metrics-server";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
version = "v1beta1";
|
||||
versionPriority = 100;
|
||||
};
|
||||
}
|
||||
]
|
@@ -1,757 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.kubernetes;
|
||||
in
|
||||
{
|
||||
options.services.kubernetes =
|
||||
with lib;
|
||||
with types;
|
||||
let
|
||||
mkCertOptions = name: {
|
||||
key = mkOption {
|
||||
description = "${name} key file.";
|
||||
type = path;
|
||||
};
|
||||
|
||||
crt = mkOption {
|
||||
description = "${name} certificate file.";
|
||||
type = path;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
enable = mkEnableOption "kubernetes";
|
||||
|
||||
lib = mkOption {
|
||||
description = "Kubernetes utility functions.";
|
||||
type = raw;
|
||||
readOnly = true;
|
||||
default = {
|
||||
mkKubeConfig =
|
||||
name: ca: cert: key:
|
||||
(pkgs.formats.json { }).generate "${name}-kubeconfig.json" {
|
||||
apiVersion = "v1";
|
||||
kind = "Config";
|
||||
clusters = [
|
||||
{
|
||||
name = "local";
|
||||
cluster = {
|
||||
server = cfg.apiserver._address;
|
||||
"certificate-authority" = ca;
|
||||
};
|
||||
}
|
||||
];
|
||||
users = [
|
||||
{
|
||||
inherit name;
|
||||
user = {
|
||||
"client-certificate" = cert;
|
||||
"client-key" = key;
|
||||
};
|
||||
}
|
||||
];
|
||||
contexts = [
|
||||
{
|
||||
name = "local";
|
||||
context = {
|
||||
cluster = "local";
|
||||
user = name;
|
||||
};
|
||||
}
|
||||
];
|
||||
current-context = "local";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
roles = mkOption {
|
||||
description = "Kubernetes role that this machine should take.";
|
||||
type = listOf (enum [
|
||||
"master"
|
||||
"node"
|
||||
]);
|
||||
default = [
|
||||
"master"
|
||||
"node"
|
||||
];
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes master server address.";
|
||||
type = str;
|
||||
default = "localhost";
|
||||
};
|
||||
|
||||
cidr = mkOption {
|
||||
description = "Kubernetes cluster CIDR.";
|
||||
type = str;
|
||||
default = "10.0.0.0/24";
|
||||
};
|
||||
|
||||
cas = {
|
||||
kubernetes = mkCertOptions "Kubernetes CA";
|
||||
frontProxy = mkCertOptions "Front Proxy CA";
|
||||
etcd = mkCertOptions "ETCD CA";
|
||||
};
|
||||
|
||||
certs = {
|
||||
apiserver = {
|
||||
server = mkCertOptions "Kubernetes API Server";
|
||||
kubeletClient = mkCertOptions "Kubernetes API Server Kubelet Client";
|
||||
etcdClient = mkCertOptions "Kubernetes API Server ETCD Client";
|
||||
};
|
||||
|
||||
etcd = {
|
||||
server = mkCertOptions "ETCD Server";
|
||||
peer = mkCertOptions "ETCD Peer";
|
||||
};
|
||||
|
||||
frontProxy = mkCertOptions "Front Proxy Client";
|
||||
|
||||
serviceAccount = {
|
||||
public = mkOption {
|
||||
description = "Service account public key file.";
|
||||
type = path;
|
||||
};
|
||||
|
||||
private = mkOption {
|
||||
description = "Service account private key file.";
|
||||
type = path;
|
||||
};
|
||||
};
|
||||
|
||||
accounts = {
|
||||
scheduler = mkCertOptions "Kubernetes Scheduler";
|
||||
controllerManager = mkCertOptions "Kubernetes Controller Manager";
|
||||
addonManager = mkCertOptions "Kubernetes Addon Manager";
|
||||
proxy = mkCertOptions "Kubernetes Proxy";
|
||||
admin = mkCertOptions "Kubernetes Admin";
|
||||
};
|
||||
};
|
||||
|
||||
kubeconfigs = mkOption {
|
||||
description = "Kubernetes kubeconfigs.";
|
||||
type = attrsOf path;
|
||||
default = { };
|
||||
};
|
||||
|
||||
apiserver = {
|
||||
_address = mkOption {
|
||||
description = "Kubernetes API server address.";
|
||||
internal = true;
|
||||
type = str;
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes API server listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "0.0.0.0";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes API server listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 6443;
|
||||
};
|
||||
|
||||
bootstrapTokenFile = mkOption {
|
||||
description = "Kubernetes API server bootstrap token file.";
|
||||
type = path;
|
||||
};
|
||||
};
|
||||
|
||||
kubelet = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes kubelet listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "0.0.0.0";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes kubelet listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 10250;
|
||||
};
|
||||
|
||||
taints =
|
||||
let
|
||||
taintOptions =
|
||||
{ name, ... }:
|
||||
{
|
||||
key = mkOption {
|
||||
description = "Taint key.";
|
||||
type = str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
value = mkOption {
|
||||
description = "Taint value.";
|
||||
type = str;
|
||||
};
|
||||
|
||||
effect = mkOption {
|
||||
description = "Taint effect.";
|
||||
type = enum [
|
||||
"NoSchedule"
|
||||
"PreferNoSchedule"
|
||||
"NoExecute"
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
mkOption {
|
||||
description = "Taints to apply to the node.";
|
||||
type = attrsOf (submodule taintOptions);
|
||||
default = { };
|
||||
};
|
||||
|
||||
bootstrapToken = mkOption {
|
||||
description = "Kubelet bootstrap token file.";
|
||||
type = path;
|
||||
};
|
||||
|
||||
seedImages = mkOption {
|
||||
description = "Container images to preload on the system.";
|
||||
type = listOf package;
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
cidr = mkOption {
|
||||
description = "Kubernetes pod CIDR.";
|
||||
type = str;
|
||||
default = "10.1.0.0/16";
|
||||
};
|
||||
};
|
||||
|
||||
scheduler = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes scheduler listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "127.0.0.1";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes scheduler listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 10251;
|
||||
};
|
||||
};
|
||||
|
||||
controllerManager = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes controller manager listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "127.0.0.1";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes controller manager listening port.";
|
||||
type = port;
|
||||
readOnly = true;
|
||||
default = 10252;
|
||||
};
|
||||
};
|
||||
|
||||
proxy = {
|
||||
address = mkOption {
|
||||
description = "Kubernetes proxy listening address.";
|
||||
type = str;
|
||||
readOnly = true;
|
||||
default = "0.0.0.0";
|
||||
};
|
||||
};
|
||||
|
||||
addonManager = {
|
||||
addons = mkOption {
|
||||
description = "Kubernetes addons.";
|
||||
type = attrsOf (coercedTo (attrs) (a: [ a ]) (listOf attrs));
|
||||
default = { };
|
||||
};
|
||||
|
||||
bootstrapAddons = mkOption {
|
||||
description = "Kubernetes addons applied with cluster-admin permissions.";
|
||||
type = attrsOf (coercedTo (attrs) (a: [ a ]) (listOf attrs));
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable (
|
||||
lib.mkMerge [
|
||||
# master or node
|
||||
{
|
||||
services.kubernetes = {
|
||||
apiserver._address = "https://${cfg.address}:${toString cfg.apiserver.port}";
|
||||
|
||||
kubeconfigs.admin =
|
||||
cfg.lib.mkKubeConfig "admin" cfg.cas.kubernetes.crt cfg.certs.accounts.admin.crt
|
||||
cfg.certs.accounts.admin.key;
|
||||
|
||||
addonManager.bootstrapAddons = {
|
||||
addonManager = import ./addons/addon-manager { };
|
||||
bootstrap = import ./addons/bootstrap { inherit config; };
|
||||
kubeletApiAdmin = import ./addons/kubelet-api-admin { };
|
||||
metricsServer = import ./addons/metrics-server { };
|
||||
};
|
||||
};
|
||||
|
||||
boot = {
|
||||
kernel.sysctl = {
|
||||
"net.bridge.bridge-nf-call-iptables" = 1;
|
||||
"net.ipv4.ip_forward" = 1;
|
||||
"net.bridge.bridge-nf-call-ip6tables" = 1;
|
||||
};
|
||||
|
||||
kernelModules = [
|
||||
"br_netfilter"
|
||||
"overlay"
|
||||
];
|
||||
};
|
||||
|
||||
users = {
|
||||
users.kubernetes = {
|
||||
uid = config.ids.uids.kubernetes;
|
||||
group = "kubernetes";
|
||||
home = "/var/lib/kubernetes";
|
||||
homeMode = "755";
|
||||
createHome = true;
|
||||
description = "Kubernetes user";
|
||||
};
|
||||
|
||||
groups.kubernetes.gid = config.ids.gids.kubernetes;
|
||||
};
|
||||
|
||||
systemd = {
|
||||
targets.kubernetes = {
|
||||
description = "Kubernetes";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
tmpfiles.rules = [
|
||||
"d /opt/cni/bin 0755 root root -"
|
||||
"d /run/kubernetes 0755 kubernetes kubernetes -"
|
||||
];
|
||||
|
||||
services = {
|
||||
kubelet =
|
||||
let
|
||||
kubeletConfig = (pkgs.formats.json { }).generate "config.json" ({
|
||||
apiVersion = "kubelet.config.k8s.io/v1beta1";
|
||||
kind = "KubeletConfiguration";
|
||||
address = cfg.kubelet.address;
|
||||
port = cfg.kubelet.port;
|
||||
authentication = {
|
||||
x509.clientCAFile = cfg.cas.kubernetes.crt;
|
||||
webhook = {
|
||||
enabled = true;
|
||||
cacheTTL = "10s";
|
||||
};
|
||||
};
|
||||
authorization.mode = "Webhook";
|
||||
cgroupDriver = "systemd";
|
||||
hairpinMode = "hairpin-veth";
|
||||
registerNode = true;
|
||||
containerRuntimeEndpoint = "unix:///run/containerd/containerd.sock";
|
||||
failSwapOn = false;
|
||||
memorySwap.swapBehavior = "LimitedSwap";
|
||||
rotateCertificates = true;
|
||||
serverTLSBootstrap = true;
|
||||
featureGates = {
|
||||
RotateKubeletServerCertificate = true;
|
||||
NodeSwap = true;
|
||||
};
|
||||
healthzBindAddress = "127.0.0.1";
|
||||
healthzPort = 10248;
|
||||
});
|
||||
|
||||
taints = lib.strings.concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (
|
||||
lib.attrsets.mapAttrsToList (n: v: v) cfg.kubelet.taints
|
||||
);
|
||||
|
||||
generateKubeletBootstrapKubeconfig = lib.meta.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "kubelet-bootstrap-kubeconfig";
|
||||
runtimeInputs = with pkgs; [ coreutils ];
|
||||
text = ''
|
||||
mkdir -p /etc/kubernetes
|
||||
cat > /etc/kubernetes/bootstrap-kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: ${cfg.cas.kubernetes.crt}
|
||||
server: ${cfg.apiserver._address}
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet-bootstrap
|
||||
name: bootstrap
|
||||
current-context: bootstrap
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubelet-bootstrap
|
||||
user:
|
||||
token: $(<${cfg.kubelet.bootstrapToken})
|
||||
EOF
|
||||
'';
|
||||
}
|
||||
);
|
||||
|
||||
seedContainerImages = lib.meta.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "seed-container-images";
|
||||
runtimeInputs = with pkgs; [
|
||||
gzip
|
||||
containerd
|
||||
coreutils
|
||||
];
|
||||
text = ''
|
||||
${lib.strings.concatMapStrings (img: ''
|
||||
echo "Seeding container image: ${img}"
|
||||
${
|
||||
if (lib.hasSuffix "gz" img) then
|
||||
''zcat "${img}" | ctr -n k8s.io image import -''
|
||||
else
|
||||
''cat "${img}" | ctr -n k8s.io image import -''
|
||||
}
|
||||
'') cfg.kubelet.seedImages}
|
||||
'';
|
||||
}
|
||||
);
|
||||
in
|
||||
{
|
||||
description = "Kubernetes Kubelet";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [
|
||||
"network.target"
|
||||
"containerd.service"
|
||||
"kube-apisever.service"
|
||||
];
|
||||
path = with pkgs; [
|
||||
kubernetes
|
||||
coreutils
|
||||
util-linux
|
||||
git
|
||||
openssh
|
||||
iproute2
|
||||
ethtool
|
||||
iptables
|
||||
socat
|
||||
thin-provisioning-tools
|
||||
];
|
||||
preStart = ''
|
||||
${generateKubeletBootstrapKubeconfig}
|
||||
${seedContainerImages}
|
||||
'';
|
||||
script = lib.strings.concatStringsSep " " (
|
||||
[
|
||||
"kubelet"
|
||||
"--config=${kubeletConfig}"
|
||||
"--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubeconfig"
|
||||
"--kubeconfig=/var/lib/kubelet/kubeconfig"
|
||||
"--cert-dir=/var/lib/kubelet/pki"
|
||||
"--hostname-override=${lib.strings.toLower config.networking.fqdnOrHostName}"
|
||||
"--kubeconfig=/etc/kubernetes/bootstrap-kubeconfig"
|
||||
"--pod-infra-container-image=pause"
|
||||
"--root-dir=/var/lib/kubelet"
|
||||
]
|
||||
++ lib.lists.optional (taints != "") [
|
||||
"--register-with-taints=${taints}"
|
||||
]
|
||||
);
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
CPUAccounting = true;
|
||||
MemoryAccounting = true;
|
||||
Restart = "on-failure";
|
||||
RestartSec = "1000ms";
|
||||
WorkingDirectory = "/var/lib/kubelet";
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-proxy = {
|
||||
description = "Kubernetes Proxy";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [
|
||||
kubernetes
|
||||
iptables
|
||||
conntrack-tools
|
||||
];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-proxy"
|
||||
"--bind-address=${cfg.proxy.address}"
|
||||
"--cluster-cidr=${cfg.kubelet.cidr}"
|
||||
"--hostname-override=${lib.strings.toLower config.networking.fqdnOrHostName}"
|
||||
"--kubeconfig=${
|
||||
cfg.lib.mkKubeConfig "kube-proxy" cfg.cas.kubernetes.crt cfg.certs.accounts.proxy.crt
|
||||
cfg.certs.accounts.proxy.key
|
||||
}"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.enable = false;
|
||||
}
|
||||
|
||||
# only master
|
||||
(lib.mkIf (lib.all (m: m == "master") cfg.roles) {
|
||||
services.kubernetes.kubelet.taints = {
|
||||
unschedulable = {
|
||||
value = "true";
|
||||
effect = "NoSchedule";
|
||||
};
|
||||
"node-role.kubernetes.io/master" = {
|
||||
value = "true";
|
||||
effect = "NoSchedule";
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
# master
|
||||
(lib.mkIf (lib.elem "master" cfg.roles) {
|
||||
services = {
|
||||
etcd = {
|
||||
enable = true;
|
||||
name = cfg.address;
|
||||
keyFile = cfg.certs.etcd.server.key;
|
||||
certFile = cfg.certs.etcd.server.crt;
|
||||
trustedCaFile = cfg.cas.etcd.crt;
|
||||
peerKeyFile = cfg.certs.etcd.peer.key;
|
||||
peerCertFile = cfg.certs.etcd.peer.crt;
|
||||
peerTrustedCaFile = cfg.cas.etcd.crt;
|
||||
clientCertAuth = true;
|
||||
peerClientCertAuth = true;
|
||||
listenClientUrls = [ "https://0.0.0.0:2379" ];
|
||||
listenPeerUrls = [ "https://0.0.0.0:2380" ];
|
||||
advertiseClientUrls = [ "https://${cfg.address}:2379" ];
|
||||
initialCluster = [ "${cfg.address}=https://${cfg.address}:2380" ];
|
||||
initialAdvertisePeerUrls = [ "https://${cfg.address}:2380" ];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
kube-apiserver = {
|
||||
description = "Kubernetes API Server";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" ];
|
||||
path = with pkgs; [ kubernetes ];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-apiserver"
|
||||
"--allow-privileged=true"
|
||||
"--authorization-mode=RBAC,Node"
|
||||
"--bind-address=${cfg.apiserver.address}"
|
||||
"--secure-port=${toString cfg.apiserver.port}"
|
||||
"--client-ca-file=${cfg.cas.kubernetes.crt}"
|
||||
"--tls-cert-file=${cfg.certs.apiserver.server.crt}"
|
||||
"--tls-private-key-file=${cfg.certs.apiserver.server.key}"
|
||||
"--enable-admission-plugins=${
|
||||
lib.strings.concatStringsSep "," [
|
||||
"NamespaceLifecycle"
|
||||
"LimitRanger"
|
||||
"ServiceAccount"
|
||||
"ResourceQuota"
|
||||
"DefaultStorageClass"
|
||||
"DefaultTolerationSeconds"
|
||||
"NodeRestriction"
|
||||
]
|
||||
}"
|
||||
"--etcd-servers=${
|
||||
lib.strings.concatStringsSep "," [
|
||||
"https://${cfg.address}:2379"
|
||||
"https://127.0.0.1:2379"
|
||||
]
|
||||
}"
|
||||
"--etcd-cafile=${cfg.cas.etcd.crt}"
|
||||
"--etcd-certfile=${cfg.certs.apiserver.etcdClient.crt}"
|
||||
"--etcd-keyfile=${cfg.certs.apiserver.etcdClient.key}"
|
||||
"--kubelet-certificate-authority=${cfg.cas.kubernetes.crt}"
|
||||
"--kubelet-client-certificate=${cfg.certs.apiserver.kubeletClient.crt}"
|
||||
"--kubelet-client-key=${cfg.certs.apiserver.kubeletClient.key}"
|
||||
"--proxy-client-cert-file=${cfg.certs.frontProxy.crt}"
|
||||
"--proxy-client-key-file=${cfg.certs.frontProxy.key}"
|
||||
"--runtime-config=authentication.k8s.io/v1beta1=true"
|
||||
"--api-audiences=api,https://kubernetes.default.svc"
|
||||
"--service-account-issuer=https://kubernetes.default.svc"
|
||||
"--service-account-signing-key-file=${cfg.certs.serviceAccount.private}"
|
||||
"--service-account-key-file=${cfg.certs.serviceAccount.public}"
|
||||
"--service-cluster-ip-range=${cfg.cidr}"
|
||||
"--storage-backend=etcd3"
|
||||
"--enable-bootstrap-token-auth=true"
|
||||
"--token-auth-file=${cfg.apiserver.bootstrapTokenFile}"
|
||||
"--requestheader-client-ca-file=${cfg.cas.frontProxy.crt}"
|
||||
"--requestheader-allowed-names=front-proxy-client"
|
||||
"--requestheader-extra-headers-prefix=X-Remote-Extra-"
|
||||
"--requestheader-group-headers=X-Remote-Group"
|
||||
"--requestheader-username-headers=X-Remote-User"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-scheduler = {
|
||||
description = "Kubernetes Scheduler";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [ kubernetes ];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-scheduler"
|
||||
"--bind-address=${cfg.scheduler.address}"
|
||||
"--secure-port=${toString cfg.scheduler.port}"
|
||||
"--leader-elect=true"
|
||||
"--kubeconfig=${
|
||||
cfg.lib.mkKubeConfig "kube-scheduler" cfg.cas.kubernetes.crt cfg.certs.accounts.scheduler.crt
|
||||
cfg.certs.accounts.scheduler.key
|
||||
}"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-controller-manager = {
|
||||
description = "Kubernetes Controller Manager";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [ kubernetes ];
|
||||
script = lib.strings.concatStringsSep " " [
|
||||
"kube-controller-manager"
|
||||
"--allocate-node-cidrs=true"
|
||||
"--bind-address=${cfg.controllerManager.address}"
|
||||
"--secure-port=${toString cfg.controllerManager.port}"
|
||||
"--cluster-cidr=${cfg.kubelet.cidr}"
|
||||
"--kubeconfig=${
|
||||
cfg.lib.mkKubeConfig "kube-controller-manager" cfg.cas.kubernetes.crt
|
||||
cfg.certs.accounts.controllerManager.crt
|
||||
cfg.certs.accounts.controllerManager.key
|
||||
}"
|
||||
"--leader-elect=true"
|
||||
"--root-ca-file=${cfg.cas.kubernetes.crt}"
|
||||
"--service-account-private-key-file=${cfg.certs.serviceAccount.private}"
|
||||
"--use-service-account-credentials"
|
||||
"--client-ca-file=${cfg.cas.kubernetes.crt}"
|
||||
"--cluster-signing-cert-file=${cfg.cas.kubernetes.crt}"
|
||||
"--cluster-signing-key-file=${cfg.cas.kubernetes.key}"
|
||||
"--requestheader-client-ca-file=${cfg.cas.frontProxy.crt}"
|
||||
];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 30;
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
|
||||
kube-addon-manager =
|
||||
let
|
||||
mkAddons =
|
||||
addons:
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: addon:
|
||||
(pkgs.formats.json { }).generate "${name}.json" {
|
||||
apiVersion = "v1";
|
||||
kind = "List";
|
||||
items = addon;
|
||||
}
|
||||
) addons;
|
||||
in
|
||||
{
|
||||
description = "Kubernetes Addon Manager";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
environment = {
|
||||
ADDON_PATH = pkgs.runCommand "kube-addons" { } ''
|
||||
mkdir -p $out
|
||||
${lib.strings.concatMapStringsSep "\n" (a: "ln -s ${a} $out/${baseNameOf a}") (
|
||||
mkAddons cfg.addonManager.addons
|
||||
)}
|
||||
'';
|
||||
KUBECONFIG =
|
||||
cfg.lib.mkKubeConfig "addon-manager" cfg.cas.kubernetes.crt cfg.certs.accounts.addonManager.crt
|
||||
cfg.certs.accounts.addonManager.key;
|
||||
};
|
||||
path = with pkgs; [
|
||||
kubernetes
|
||||
gawk
|
||||
];
|
||||
preStart = ''
|
||||
export KUBECONFIG=${cfg.kubeconfigs.admin}
|
||||
kubectl apply -f ${lib.strings.concatStringsSep " \\\n -f " (mkAddons cfg.addonManager.bootstrapAddons)}
|
||||
'';
|
||||
script = "kube-addons";
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
PermissionsStartOnly = true;
|
||||
WorkingDirectory = "/var/lib/kubernetes";
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 10;
|
||||
};
|
||||
unitConfig.StartLimitIntervalSec = 0;
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
# node
|
||||
(lib.mkIf (lib.elem "node" cfg.roles) {
|
||||
virtualisation.containerd = {
|
||||
enable = true;
|
||||
settings = {
|
||||
version = 2;
|
||||
root = "/var/lib/containerd";
|
||||
state = "/run/containerd";
|
||||
oom_score = 0;
|
||||
grpc.address = "/run/containerd/containerd.sock";
|
||||
plugins."io.containerd.grpc.v1.cri" = {
|
||||
containerd.runtimes.runc = {
|
||||
runtime_type = "io.containerd.runc.v2";
|
||||
options.SystemdCgroup = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
@@ -1,293 +0,0 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
sops.secrets = {
|
||||
"kubernetes/ca/kubernetes/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/kubernetes/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/front-proxy/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/front-proxy/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/etcd/crt" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/ca/etcd/key" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/server/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/server/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/etcd-client/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/etcd-client/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/kubelet-client/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/apiserver/kubelet-client/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/front-proxy/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/front-proxy/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/server/crt" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/server/key" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/peer/crt" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/etcd/peer/key" = {
|
||||
owner = "etcd";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/sa/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/sa/pub" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/scheduler/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/scheduler/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/controller-manager/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/controller-manager/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/addon-manager/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/addon-manager/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/proxy/crt" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/proxy/key" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/admin/crt" = {
|
||||
group = "kubernetes";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/admin/key" = {
|
||||
group = "kubernetes";
|
||||
};
|
||||
|
||||
"kubernetes/token/kubelet-bootstrap/token" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/token/kubelet-bootstrap/csv" = {
|
||||
owner = "kubernetes";
|
||||
group = "kubernetes";
|
||||
mode = "0440";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes = {
|
||||
cas = {
|
||||
kubernetes = {
|
||||
key = config.sops.secrets."kubernetes/ca/kubernetes/key".path;
|
||||
crt = config.sops.secrets."kubernetes/ca/kubernetes/crt".path;
|
||||
};
|
||||
|
||||
frontProxy = {
|
||||
key = config.sops.secrets."kubernetes/ca/front-proxy/key".path;
|
||||
crt = config.sops.secrets."kubernetes/ca/front-proxy/crt".path;
|
||||
};
|
||||
|
||||
etcd = {
|
||||
key = config.sops.secrets."kubernetes/ca/etcd/key".path;
|
||||
crt = config.sops.secrets."kubernetes/ca/etcd/crt".path;
|
||||
};
|
||||
};
|
||||
|
||||
certs = {
|
||||
apiserver = {
|
||||
server = {
|
||||
key = config.sops.secrets."kubernetes/cert/apiserver/server/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/apiserver/server/crt".path;
|
||||
};
|
||||
|
||||
etcdClient = {
|
||||
key = config.sops.secrets."kubernetes/cert/apiserver/etcd-client/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/apiserver/etcd-client/crt".path;
|
||||
};
|
||||
|
||||
kubeletClient = {
|
||||
key = config.sops.secrets."kubernetes/cert/apiserver/kubelet-client/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/apiserver/kubelet-client/crt".path;
|
||||
};
|
||||
};
|
||||
|
||||
etcd = {
|
||||
server = {
|
||||
key = config.sops.secrets."kubernetes/cert/etcd/server/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/etcd/server/crt".path;
|
||||
};
|
||||
|
||||
peer = {
|
||||
key = config.sops.secrets."kubernetes/cert/etcd/peer/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/etcd/peer/crt".path;
|
||||
};
|
||||
};
|
||||
|
||||
frontProxy = {
|
||||
key = config.sops.secrets."kubernetes/cert/front-proxy/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/front-proxy/crt".path;
|
||||
};
|
||||
|
||||
serviceAccount = {
|
||||
private = config.sops.secrets."kubernetes/cert/sa/key".path;
|
||||
public = config.sops.secrets."kubernetes/cert/sa/pub".path;
|
||||
};
|
||||
|
||||
accounts = {
|
||||
scheduler = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/scheduler/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/scheduler/crt".path;
|
||||
};
|
||||
|
||||
controllerManager = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/controller-manager/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/controller-manager/crt".path;
|
||||
};
|
||||
|
||||
addonManager = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/addon-manager/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/addon-manager/crt".path;
|
||||
};
|
||||
|
||||
proxy = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/proxy/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/proxy/crt".path;
|
||||
};
|
||||
|
||||
admin = {
|
||||
key = config.sops.secrets."kubernetes/cert/accounts/admin/key".path;
|
||||
crt = config.sops.secrets."kubernetes/cert/accounts/admin/crt".path;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
kubelet.bootstrapToken = config.sops.secrets."kubernetes/token/kubelet-bootstrap/token".path;
|
||||
|
||||
apiserver.bootstrapTokenFile = config.sops.secrets."kubernetes/token/kubelet-bootstrap/csv".path;
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
kubelet.after = [ "sops-nix.service" ];
|
||||
kube-apiserver.after = [ "sops-nix.service" ];
|
||||
kube-controller-manager.after = [ "sops-nix.service" ];
|
||||
kube-scheduler.after = [ "sops-nix.service" ];
|
||||
kube-proxy.after = [ "sops-nix.service" ];
|
||||
kube-addon-manager.after = [ "sops-nix.service" ];
|
||||
etcd.after = [ "sops-nix.service" ];
|
||||
};
|
||||
}
|
@@ -1,207 +0,0 @@
|
||||
#!/usr/bin/env -S nix shell nixpkgs#openssl nixpkgs#yq-go nixpkgs#sops -c bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
generate_ca() {
|
||||
local target_dir=$1
|
||||
local ca_name=$2
|
||||
local ca_days=$3
|
||||
local cn=$4
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local ca_key=${target_dir}/${ca_name}.key
|
||||
local ca_cert=${target_dir}/${ca_name}.crt
|
||||
|
||||
openssl genrsa -out "${ca_key}" 2048
|
||||
openssl req -x509 -new -nodes -key "${ca_key}" -days "${ca_days}" -out "${ca_cert}" -subj "/CN=${cn}"
|
||||
}
|
||||
|
||||
generate_alt_names() {
|
||||
local hosts=("$@")
|
||||
local dns=0
|
||||
local ip=0
|
||||
local alt_names=""
|
||||
|
||||
for host in "${hosts[@]}"; do
|
||||
if [[ ${host} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
alt_names="${alt_names}IP.${ip} = ${host}\n"
|
||||
((ip++))
|
||||
else
|
||||
alt_names="${alt_names}DNS.${dns} = ${host}\n"
|
||||
((dns++))
|
||||
fi
|
||||
done
|
||||
|
||||
echo -e "${alt_names}"
|
||||
}
|
||||
|
||||
generate_cnf() {
|
||||
local target_dir=$1
|
||||
local cnf_name=$2
|
||||
local cn=$3
|
||||
local hosts=("${@:4}")
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local cnf_file=${target_dir}/${cnf_name}.cnf
|
||||
|
||||
cat <<EOF > "${cnf_file}"
|
||||
[req]
|
||||
prompt = no
|
||||
|
||||
[ req_ext ]
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[ alt_names ]
|
||||
$(generate_alt_names "${hosts[@]}")
|
||||
|
||||
[ v3_ext ]
|
||||
authorityKeyIdentifier=keyid,issuer:always
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage=keyEncipherment,dataEncipherment,digitalSignature
|
||||
extendedKeyUsage=serverAuth,clientAuth
|
||||
subjectAltName=@alt_names
|
||||
EOF
|
||||
}
|
||||
|
||||
generate_crt() {
|
||||
local target_dir=$1
|
||||
local cert_name=$2
|
||||
local cert_days=$3
|
||||
local cn=$4
|
||||
local o=$5
|
||||
local ca_key=$6
|
||||
local ca_cert=$7
|
||||
local hosts=("${@:8}")
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local cert_key=${target_dir}/${cert_name}.key
|
||||
local cert_csr=${target_dir}/${cert_name}.csr
|
||||
local cert_cert=${target_dir}/${cert_name}.crt
|
||||
|
||||
openssl genrsa -out "${cert_key}" 2048
|
||||
|
||||
local subject="/CN=${cn}"
|
||||
if [ -n "${o}" ]; then
|
||||
subject="${subject}/O=${o}"
|
||||
fi
|
||||
|
||||
if [ -n "${hosts}" ]; then
|
||||
generate_cnf "${target_dir}" "${cert_name}" "${cn}" "${hosts[@]}"
|
||||
openssl req -new -key "${cert_key}" -out "${cert_csr}" -subj "${subject}" -config "${target_dir}"/"${cert_name}".cnf
|
||||
openssl x509 -req -in "${cert_csr}" -CA "${ca_cert}" -CAkey "${ca_key}" -CAcreateserial -out "${cert_cert}" -days "${cert_days}" -extfile "${target_dir}"/"${cert_name}".cnf -extensions v3_ext
|
||||
else
|
||||
openssl req -new -key "${cert_key}" -out "${cert_csr}" -subj "${subject}"
|
||||
openssl x509 -req -in "${cert_csr}" -CA "${ca_cert}" -CAkey "${ca_key}" -CAcreateserial -out "${cert_cert}" -days "${cert_days}"
|
||||
fi
|
||||
}
|
||||
|
||||
generate_key_pair() {
|
||||
local target_dir=$1
|
||||
local key_name=$2
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local private_key=${target_dir}/${key_name}.key
|
||||
local public_key=${target_dir}/${key_name}.pub
|
||||
|
||||
openssl genrsa -out "${private_key}" 2048
|
||||
openssl rsa -in "${private_key}" -pubout -out "${public_key}"
|
||||
}
|
||||
|
||||
generate_auth_token() {
|
||||
local target_dir=$1
|
||||
local token_name=$2
|
||||
local user=$3
|
||||
local id=$4
|
||||
local groups=$5
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
local token_file="${target_dir}/${token_name}.token"
|
||||
local token_auth_file="${target_dir}/${token_name}.csv"
|
||||
|
||||
token="$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')"
|
||||
echo "${token}" > "${token_file}"
|
||||
echo "${token},${user},${id},\"${groups}\"" > "${token_auth_file}"
|
||||
}
|
||||
|
||||
DEFAULT_CA_DAYS=3650
|
||||
|
||||
if [[ -z "$SOPS_AGE_KEY_FILE" ]]; then
|
||||
echo "Please set the SOPS_AGE_KEY_FILE environment variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
hostname=${1:-$(hostname)}
|
||||
|
||||
if [ -z "${hostname}" ]; then
|
||||
echo "Usage: $0 [hostname]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
generate_ca out/ca kubernetes ${DEFAULT_CA_DAYS} kubernetes-ca ""
|
||||
generate_ca out/ca front-proxy ${DEFAULT_CA_DAYS} kubernetes-front-proxy-ca ""
|
||||
generate_ca out/ca etcd ${DEFAULT_CA_DAYS} etcd-ca ""
|
||||
|
||||
generate_crt out/cert/apiserver server ${DEFAULT_CA_DAYS} kube-apiserver "" out/ca/kubernetes.key out/ca/kubernetes.crt "kubernetes" "kubernetes.default" "kubernetes.default.svc" "kubernetes.default.svc.cluster" "kubernetes.default.svc.cluster.local" "localhost" "10.0.0.1" "127.0.0.1"
|
||||
generate_crt out/cert/apiserver etcd-client ${DEFAULT_CA_DAYS} kube-apiserver-etcd-client "" out/ca/etcd.key out/ca/etcd.crt ""
|
||||
generate_crt out/cert/apiserver kubelet-client ${DEFAULT_CA_DAYS} kube-apiserver-kubelet-client "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/etcd server ${DEFAULT_CA_DAYS} kube-etcd "" out/ca/etcd.key out/ca/etcd.crt "etcd.local" "etcd.cluster.local" "localhost" "127.0.0.1"
|
||||
generate_crt out/cert/etcd peer ${DEFAULT_CA_DAYS} kube-etcd-peer "" out/ca/etcd.key out/ca/etcd.crt "etcd.local" "etcd.cluster.local" "localhost" "127.0.0.1"
|
||||
generate_crt out/cert front-proxy ${DEFAULT_CA_DAYS} front-proxy-client "" out/ca/front-proxy.key out/ca/front-proxy.crt ""
|
||||
|
||||
generate_key_pair out/cert sa
|
||||
|
||||
generate_crt out/cert/accounts scheduler ${DEFAULT_CA_DAYS} system:kube-scheduler "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts controller-manager ${DEFAULT_CA_DAYS} system:kube-controller-manager "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts addon-manager ${DEFAULT_CA_DAYS} system:kube-addon-manager "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts proxy ${DEFAULT_CA_DAYS} system:kube-proxy "" out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts admin ${DEFAULT_CA_DAYS} kubernetes-admin system:masters out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
generate_crt out/cert/accounts users ${DEFAULT_CA_DAYS} kubernetes-users system:masters out/ca/kubernetes.key out/ca/kubernetes.crt ""
|
||||
|
||||
generate_auth_token out/token kubelet-bootstrap "kubelet-bootstrap" 10001 "system:bootstrappers"
|
||||
|
||||
sops_config="../../../../../$(hostname)/secrets/sops.yaml"
|
||||
secrets_file="../../../../../$(hostname)/secrets/secrets.yaml"
|
||||
decrypted_secrets_file="../../../../../$(hostname)/secrets/.decrypted~secrets.yaml"
|
||||
sops -d "${secrets_file}" > "${decrypted_secrets_file}"
|
||||
|
||||
yq -i '
|
||||
del(.kubernetes) |
|
||||
.kubernetes.ca.kubernetes.crt = load_str("out/ca/kubernetes.crt") |
|
||||
.kubernetes.ca.kubernetes.key = load_str("out/ca/kubernetes.key") |
|
||||
.kubernetes.ca.front-proxy.crt = load_str("out/ca/front-proxy.crt") |
|
||||
.kubernetes.ca.front-proxy.key = load_str("out/ca/front-proxy.key") |
|
||||
.kubernetes.ca.etcd.crt = load_str("out/ca/etcd.crt") |
|
||||
.kubernetes.ca.etcd.key = load_str("out/ca/etcd.key") |
|
||||
.kubernetes.cert.apiserver.server.crt = load_str("out/cert/apiserver/server.crt") |
|
||||
.kubernetes.cert.apiserver.server.key = load_str("out/cert/apiserver/server.key") |
|
||||
.kubernetes.cert.apiserver.etcd-client.crt = load_str("out/cert/apiserver/etcd-client.crt") |
|
||||
.kubernetes.cert.apiserver.etcd-client.key = load_str("out/cert/apiserver/etcd-client.key") |
|
||||
.kubernetes.cert.apiserver.kubelet-client.crt = load_str("out/cert/apiserver/kubelet-client.crt") |
|
||||
.kubernetes.cert.apiserver.kubelet-client.key = load_str("out/cert/apiserver/kubelet-client.key") |
|
||||
.kubernetes.cert.etcd.server.crt = load_str("out/cert/etcd/server.crt") |
|
||||
.kubernetes.cert.etcd.server.key = load_str("out/cert/etcd/server.key") |
|
||||
.kubernetes.cert.etcd.peer.crt = load_str("out/cert/etcd/peer.crt") |
|
||||
.kubernetes.cert.etcd.peer.key = load_str("out/cert/etcd/peer.key") |
|
||||
.kubernetes.cert.front-proxy.crt = load_str("out/cert/front-proxy.crt") |
|
||||
.kubernetes.cert.front-proxy.key = load_str("out/cert/front-proxy.key") |
|
||||
.kubernetes.cert.sa.key = load_str("out/cert/sa.key") |
|
||||
.kubernetes.cert.sa.pub = load_str("out/cert/sa.pub") |
|
||||
.kubernetes.cert.accounts.scheduler.crt = load_str("out/cert/accounts/scheduler.crt") |
|
||||
.kubernetes.cert.accounts.scheduler.key = load_str("out/cert/accounts/scheduler.key") |
|
||||
.kubernetes.cert.accounts.controller-manager.crt = load_str("out/cert/accounts/controller-manager.crt") |
|
||||
.kubernetes.cert.accounts.controller-manager.key = load_str("out/cert/accounts/controller-manager.key") |
|
||||
.kubernetes.cert.accounts.addon-manager.crt = load_str("out/cert/accounts/addon-manager.crt") |
|
||||
.kubernetes.cert.accounts.addon-manager.key = load_str("out/cert/accounts/addon-manager.key") |
|
||||
.kubernetes.cert.accounts.proxy.crt = load_str("out/cert/accounts/proxy.crt") |
|
||||
.kubernetes.cert.accounts.proxy.key = load_str("out/cert/accounts/proxy.key") |
|
||||
.kubernetes.cert.accounts.admin.crt = load_str("out/cert/accounts/admin.crt") |
|
||||
.kubernetes.cert.accounts.admin.key = load_str("out/cert/accounts/admin.key") |
|
||||
.kubernetes.cert.accounts.users.crt = load_str("out/cert/accounts/users.crt") |
|
||||
.kubernetes.cert.accounts.users.key = load_str("out/cert/accounts/users.key") |
|
||||
.kubernetes.token.kubelet-bootstrap.token = load_str("out/token/kubelet-bootstrap.token") |
|
||||
.kubernetes.token.kubelet-bootstrap.csv = load_str("out/token/kubelet-bootstrap.csv")
|
||||
' "${decrypted_secrets_file}"
|
||||
|
||||
sops --config "${sops_config}" -e "${decrypted_secrets_file}" > "${secrets_file}"
|
||||
rm -rf ${decrypted_secrets_file} out
|
@@ -22,38 +22,14 @@
|
||||
"/cache"."${home}/.kube/cache" = { };
|
||||
};
|
||||
|
||||
users.users.${user}.extraGroups = [ "kubernetes" ];
|
||||
|
||||
sops.secrets = {
|
||||
"kubernetes/cert/accounts/${user}/crt" = {
|
||||
key = "kubernetes/cert/accounts/users/crt";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
"kubernetes/cert/accounts/${user}/key" = {
|
||||
key = "kubernetes/cert/accounts/users/key";
|
||||
group = "users";
|
||||
mode = "0440";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.kubeconfigs.${user} =
|
||||
config.services.kubernetes.lib.mkKubeConfig user config.sops.secrets."kubernetes/ca/kubernetes/crt".path
|
||||
config.sops.secrets."kubernetes/cert/accounts/${user}/crt".path
|
||||
config.sops.secrets."kubernetes/cert/accounts/${user}/key".path;
|
||||
|
||||
home-manager.users.${user} = {
|
||||
home = {
|
||||
packages = with pkgs; [
|
||||
kubectl
|
||||
kustomize
|
||||
kubernetes-helm
|
||||
kompose
|
||||
];
|
||||
|
||||
file.".kube/local".source = config.services.kubernetes.kubeconfigs.${user};
|
||||
};
|
||||
home.packages = with pkgs; [
|
||||
kubectl
|
||||
kustomize
|
||||
kubernetes-helm
|
||||
kompose
|
||||
kind
|
||||
];
|
||||
|
||||
programs = {
|
||||
k9s = {
|
||||
|
@@ -1,6 +0,0 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
services.kubernetes.addonManager.bootstrapAddons = lib.mkMerge [
|
||||
(import ./namespace { })
|
||||
];
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
viya-ns = {
|
||||
apiVersion = "v1";
|
||||
kind = "Namespace";
|
||||
metadata = {
|
||||
name = "viya";
|
||||
};
|
||||
};
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./addons ];
|
||||
}
|
@@ -24,7 +24,6 @@
|
||||
../common/configs/system/git
|
||||
../common/configs/system/gpg-agent
|
||||
../common/configs/system/impermanence
|
||||
../common/configs/system/kubernetes
|
||||
../common/configs/system/neovim
|
||||
../common/configs/system/networking
|
||||
../common/configs/system/nix
|
||||
@@ -50,7 +49,6 @@
|
||||
|
||||
./configs/git
|
||||
./configs/globalprotect-remote-connect
|
||||
./configs/viya
|
||||
|
||||
./users/nikara
|
||||
];
|
||||
|
@@ -13,7 +13,7 @@ in
|
||||
"viya/orders-cli/secret".sopsFile = ../../../../../../../secrets/sas/secrets.yaml;
|
||||
};
|
||||
|
||||
home.packages = [ (pkgs.callPackage ./orders-cli/package.nix { }) ];
|
||||
home.packages = [ (pkgs.callPackage ./package.nix { }) ];
|
||||
|
||||
xdg.configFile."viya4-orders-cli/config.yaml".source =
|
||||
(pkgs.formats.yaml { }).generate "config.yaml"
|
@@ -81,7 +81,7 @@ in
|
||||
(import ./configs/gui/vscode { inherit user home; })
|
||||
|
||||
# Private Imports
|
||||
(import ./configs/console/viya { inherit user home; })
|
||||
(import ./configs/console/viya-orders-cli { inherit user home; })
|
||||
];
|
||||
|
||||
# echo "password" | mkpasswd -s
|
||||
|
Submodule submodules/nixpkgs updated: e8e18ef630...43ed29dceb
Reference in New Issue
Block a user