Replace telegraf with node exporter
Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
@@ -13,6 +13,5 @@
|
||||
|
||||
initrd.systemd.enable = true;
|
||||
kernelPackages = pkgs.linuxPackages_latest;
|
||||
supportedFilesystems = [ "btrfs" ];
|
||||
};
|
||||
}
|
||||
|
@@ -1,5 +1,10 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
boot = {
|
||||
initrd.supportedFilesystems = [ "btrfs" ];
|
||||
supportedFilesystems = [ "btrfs" ];
|
||||
};
|
||||
|
||||
services.btrfs.autoScrub = {
|
||||
enable = true;
|
||||
interval = "weekly";
|
||||
|
@@ -1,9 +1,4 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [ ./options.nix ];
|
||||
|
||||
@@ -11,15 +6,15 @@
|
||||
# https://github.com/NixOS/nixpkgs/blob/master/nixos/doc/manual/administration/systemd-state.section.md
|
||||
# https://github.com/NixOS/nixpkgs/pull/286140/files
|
||||
# https://git.eisfunke.com/config/nixos/-/blob/e65e1dc21d06d07b454005762b177ef151f8bfb6/nixos/machine-id.nix
|
||||
sops.secrets."machineId".mode = "0444";
|
||||
sops.secrets.machineId.mode = "0444";
|
||||
|
||||
fileSystems."/persist".neededForBoot = true;
|
||||
|
||||
environment = {
|
||||
impermanence.enable = true;
|
||||
|
||||
etc."machine-id".source = pkgs.runCommandLocal "machine-id-link" { } ''
|
||||
ln -s ${config.sops.secrets."machineId".path} $out
|
||||
etc.machine-id.source = pkgs.runCommandLocal "machine-id-link" { } ''
|
||||
ln -s ${config.sops.secrets.machineId.path} $out
|
||||
'';
|
||||
|
||||
persistence = {
|
||||
|
@@ -8,7 +8,7 @@
|
||||
../../../../../secrets/personal/secrets.yaml;
|
||||
};
|
||||
|
||||
templates."nix-access-tokens" = {
|
||||
templates.nix-access-tokens = {
|
||||
content = ''
|
||||
access-tokens = github.com=${config.sops.placeholder."git/credentials/github.com/public/password"}
|
||||
'';
|
||||
@@ -33,7 +33,7 @@
|
||||
registry.self.flake = inputs.self;
|
||||
|
||||
extraOptions = ''
|
||||
!include ${config.sops.templates."nix-access-tokens".path}
|
||||
!include ${config.sops.templates.nix-access-tokens.path}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
@@ -1,13 +1,5 @@
|
||||
{ ... }:
|
||||
{
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
fail2ban = prev.fail2ban.overrideAttrs (oldAttrs: {
|
||||
patches = oldAttrs.patches or [ ] ++ [ ./remove-umask.patch ];
|
||||
});
|
||||
})
|
||||
];
|
||||
|
||||
environment = {
|
||||
enableAllTerminfo = true;
|
||||
persistence."/persist/state"."/var/lib/fail2ban" = { };
|
||||
@@ -32,12 +24,4 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.fail2ban.serviceConfig = {
|
||||
User = "root";
|
||||
Group = "fail2ban";
|
||||
UMask = "0117";
|
||||
};
|
||||
|
||||
users.groups.fail2ban = { };
|
||||
}
|
||||
|
@@ -1,15 +0,0 @@
|
||||
diff --git a/fail2ban/server/server.py b/fail2ban/server/server.py
|
||||
index e438c4ca..aeee4075 100644
|
||||
--- a/fail2ban/server/server.py
|
||||
+++ b/fail2ban/server/server.py
|
||||
@@ -108,9 +108,7 @@ class Server:
|
||||
signal.signal(s, new)
|
||||
|
||||
def start(self, sock, pidfile, force=False, observer=True, conf={}):
|
||||
- # First set the mask to only allow access to owner
|
||||
- os.umask(0o077)
|
||||
- # Second daemonize before logging etc, because it will close all handles:
|
||||
+ # Daemonize before logging etc, because it will close all handles:
|
||||
if self.__daemon: # pragma: no cover
|
||||
logSys.info("Starting in daemon mode")
|
||||
ret = self.__createDaemon()
|
@@ -1,117 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.polkit.extraConfig = ''
|
||||
polkit.addRule(function(action, subject) {
|
||||
if (
|
||||
subject.user == "telegraf"
|
||||
&& action.id.indexOf("org.freedesktop.systemd1.") == 0
|
||||
)
|
||||
{ return polkit.Result.YES; }
|
||||
});
|
||||
'';
|
||||
|
||||
services.telegraf = {
|
||||
enable = true;
|
||||
|
||||
extraConfig = {
|
||||
agent.quiet = true;
|
||||
|
||||
outputs.prometheus_client = [ { listen = ":9273"; } ];
|
||||
|
||||
inputs =
|
||||
{
|
||||
cpu = [ { report_active = true; } ];
|
||||
|
||||
disk = [
|
||||
{
|
||||
mount_points = lib.attrsets.mapAttrsToList (_: fs: fs.mountPoint) config.fileSystems;
|
||||
}
|
||||
];
|
||||
|
||||
diskio = [ { skip_serial_number = false; } ];
|
||||
|
||||
kernel = [ { } ];
|
||||
|
||||
mem = [ { } ];
|
||||
|
||||
processes = [ { } ];
|
||||
|
||||
swap = [ { } ];
|
||||
|
||||
system = [ { } ];
|
||||
|
||||
internal = [ { } ];
|
||||
|
||||
# TODO: Enable
|
||||
# linux_cpu = [ { } ];
|
||||
|
||||
net = [ { ignore_protocol_stats = true; } ];
|
||||
|
||||
# TODO: Enable
|
||||
# sensors = [ { remove_numbers = false; } ];
|
||||
|
||||
smart = [ { } ];
|
||||
|
||||
# TODO: Enable
|
||||
# amd_rocm_smi = [ { } ];
|
||||
|
||||
systemd_units = [ { } ];
|
||||
}
|
||||
// lib.attrsets.optionalAttrs config.virtualisation.podman.enable {
|
||||
docker = [
|
||||
{
|
||||
endpoint = "unix:///var/run/podman/podman.sock";
|
||||
perdevice = false;
|
||||
perdevice_include = [
|
||||
"cpu"
|
||||
"blkio"
|
||||
"network"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
// lib.attrsets.optionalAttrs config.services.fail2ban.enable {
|
||||
fail2ban = [ { } ];
|
||||
}
|
||||
// lib.attrsets.optionalAttrs (config.networking.wireguard.interfaces != { }) {
|
||||
wireguard = [ { } ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.telegraf = {
|
||||
path =
|
||||
with pkgs;
|
||||
[
|
||||
dbus
|
||||
smartmontools
|
||||
# TODO: Enable
|
||||
# lm_sensors
|
||||
# rocmPackages.rocm-smi
|
||||
]
|
||||
++ lib.lists.optional config.services.fail2ban.enable fail2ban;
|
||||
|
||||
environment = {
|
||||
DBUS_SYSTEM_BUS_ADDRESS = "unix:path=/var/run/dbus/system_bus_socket";
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
AmbientCapabilities = [
|
||||
"CAP_NET_RAW"
|
||||
"CAP_SYS_RAWIO"
|
||||
] ++ lib.lists.optional (config.networking.wireguard.interfaces != { }) "CAP_NET_ADMIN";
|
||||
|
||||
SupplementaryGroups =
|
||||
[
|
||||
"disk"
|
||||
]
|
||||
++ lib.lists.optional config.virtualisation.podman.enable "podman"
|
||||
++ lib.lists.optional config.services.fail2ban.enable "fail2ban";
|
||||
};
|
||||
};
|
||||
}
|
@@ -1,58 +0,0 @@
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
port = 9273 + config.users.users.${user}.uid;
|
||||
hmConfig = config.home-manager.users.${user};
|
||||
in
|
||||
{
|
||||
home-manager.users.${user}.systemd.user.services.telegraf =
|
||||
let
|
||||
telegrafConfig = (pkgs.formats.toml { }).generate "config.toml" {
|
||||
agent.quiet = true;
|
||||
|
||||
outputs.prometheus_client = [ { listen = ":${builtins.toString port}"; } ];
|
||||
|
||||
inputs =
|
||||
{
|
||||
systemd_units = [
|
||||
{ scope = "user"; }
|
||||
];
|
||||
}
|
||||
// lib.attrsets.optionalAttrs hmConfig.services.podman.enable {
|
||||
docker = [
|
||||
{
|
||||
endpoint =
|
||||
let
|
||||
uid = builtins.toString config.users.users.${user}.uid;
|
||||
in
|
||||
"unix:///var/run/user/${uid}/podman/podman.sock";
|
||||
perdevice = false;
|
||||
perdevice_include = [
|
||||
"cpu"
|
||||
"blkio"
|
||||
"network"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
Unit.Description = "Telegraf Agent";
|
||||
|
||||
Install.WantedBy = [ "default.target" ];
|
||||
|
||||
Service = {
|
||||
ExecStart = "${config.services.telegraf.package}/bin/telegraf -config ${telegrafConfig}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
}
|
@@ -38,14 +38,14 @@ in
|
||||
EmailTracking = true;
|
||||
};
|
||||
FirefoxHome = {
|
||||
"Locked" = true;
|
||||
"Search" = true;
|
||||
"TopSites" = true;
|
||||
"SponsoredTopSites" = false;
|
||||
"Highlights" = false;
|
||||
"Pocket" = false;
|
||||
"SponsoredPocket" = false;
|
||||
"Snippets" = false;
|
||||
Locked = true;
|
||||
Search = true;
|
||||
TopSites = true;
|
||||
SponsoredTopSites = false;
|
||||
Highlights = false;
|
||||
Pocket = false;
|
||||
SponsoredPocket = false;
|
||||
Snippets = false;
|
||||
};
|
||||
NoDefaultBookmarks = true;
|
||||
OfferToSaveLogins = false;
|
||||
@@ -94,10 +94,10 @@ in
|
||||
];
|
||||
force = true;
|
||||
engines = {
|
||||
"google".metaData.alias = "@g";
|
||||
"ddg".metaData.alias = "@d";
|
||||
"wikipedia".metaData.alias = "@w";
|
||||
"nix" = {
|
||||
google.metaData.alias = "@g";
|
||||
ddg.metaData.alias = "@d";
|
||||
wikipedia.metaData.alias = "@w";
|
||||
nix = {
|
||||
urls = [
|
||||
{
|
||||
template = "https://mynixos.com/search";
|
||||
|
@@ -308,85 +308,85 @@ in
|
||||
settings = {
|
||||
"markerIconRules" = [
|
||||
{
|
||||
"ruleName" = "default";
|
||||
"preset" = true;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-circle";
|
||||
"markerColor" = "blue";
|
||||
ruleName = "default";
|
||||
preset = true;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-circle";
|
||||
markerColor = "blue";
|
||||
};
|
||||
}
|
||||
{
|
||||
"ruleName" = "#restaurant";
|
||||
"preset" = false;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-utensils";
|
||||
"markerColor" = "red";
|
||||
ruleName = "#restaurant";
|
||||
preset = false;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-utensils";
|
||||
markerColor = "red";
|
||||
};
|
||||
}
|
||||
{
|
||||
"ruleName" = "#bar";
|
||||
"preset" = false;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-martini-glass";
|
||||
"markerColor" = "purple";
|
||||
ruleName = "#bar";
|
||||
preset = false;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-martini-glass";
|
||||
markerColor = "purple";
|
||||
};
|
||||
}
|
||||
{
|
||||
"ruleName" = "#coffee";
|
||||
"preset" = false;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-mug-hot";
|
||||
"markerColor" = "purple";
|
||||
ruleName = "#coffee";
|
||||
preset = false;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-mug-hot";
|
||||
markerColor = "purple";
|
||||
};
|
||||
}
|
||||
{
|
||||
"ruleName" = "#culture";
|
||||
"preset" = false;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-building-columns";
|
||||
"markerColor" = "black";
|
||||
ruleName = "#culture";
|
||||
preset = false;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-building-columns";
|
||||
markerColor = "black";
|
||||
};
|
||||
}
|
||||
{
|
||||
"ruleName" = "#shopping";
|
||||
"preset" = false;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-shopping-bag";
|
||||
"markerColor" = "yellow";
|
||||
ruleName = "#shopping";
|
||||
preset = false;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-shopping-bag";
|
||||
markerColor = "yellow";
|
||||
};
|
||||
}
|
||||
{
|
||||
"ruleName" = "#entertainment";
|
||||
"preset" = false;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-microphone";
|
||||
"markerColor" = "pink";
|
||||
ruleName = "#entertainment";
|
||||
preset = false;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-microphone";
|
||||
markerColor = "pink";
|
||||
};
|
||||
}
|
||||
{
|
||||
"ruleName" = "#nature";
|
||||
"preset" = false;
|
||||
"iconDetails" = {
|
||||
"prefix" = "fas";
|
||||
"icon" = "fa-tree";
|
||||
"markerColor" = "green";
|
||||
ruleName = "#nature";
|
||||
preset = false;
|
||||
iconDetails = {
|
||||
prefix = "fas";
|
||||
icon = "fa-tree";
|
||||
markerColor = "green";
|
||||
};
|
||||
}
|
||||
];
|
||||
"searchProvider" = "google";
|
||||
"geocodingApiMethod" = "path";
|
||||
"geocodingApiPath" = hmConfig.sops.secrets."google/geocoding".path;
|
||||
"useGooglePlaces" = true;
|
||||
"letZoomBeyondMax" = true;
|
||||
"showGeolinkPreview" = true;
|
||||
"newNotePath" = "Inbox";
|
||||
searchProvider = "google";
|
||||
geocodingApiMethod = "path";
|
||||
geocodingApiPath = hmConfig.sops.secrets."google/geocoding".path;
|
||||
useGooglePlaces = true;
|
||||
letZoomBeyondMax = true;
|
||||
showGeolinkPreview = true;
|
||||
newNotePath = "Inbox";
|
||||
};
|
||||
}
|
||||
{
|
||||
|
@@ -64,8 +64,8 @@ in
|
||||
(lib.mkIf cfg.copilot.enable {
|
||||
"github.copilot.enable" = {
|
||||
"*" = true;
|
||||
"plaintext" = true;
|
||||
"markdown" = true;
|
||||
plaintext = true;
|
||||
markdown = true;
|
||||
};
|
||||
"chat.editing.alwaysSaveWithGeneratedChanges" = true;
|
||||
})
|
||||
|
4
hosts/jupiter-vps/configs/podman/default.nix
Normal file
4
hosts/jupiter-vps/configs/podman/default.nix
Normal file
@@ -0,0 +1,4 @@
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./prometheus ];
|
||||
}
|
65
hosts/jupiter-vps/configs/podman/prometheus/default.nix
Normal file
65
hosts/jupiter-vps/configs/podman/prometheus/default.nix
Normal file
@@ -0,0 +1,65 @@
|
||||
{ inputs, system, ... }:
|
||||
let
|
||||
selfPkgs = inputs.self.packages.${system};
|
||||
in
|
||||
{
|
||||
boot.kernelParams = [ "psi=1" ];
|
||||
|
||||
networking.firewall.interfaces.wg0.allowedTCPPorts = [
|
||||
9100
|
||||
9882
|
||||
9191
|
||||
];
|
||||
|
||||
virtualisation.quadlet.containers = {
|
||||
prometheus-node-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-node-exporter}";
|
||||
# Allow collecting host metrics, port :9100 by default
|
||||
networks = [ "host" ];
|
||||
podmanArgs = [
|
||||
"--pid"
|
||||
"host"
|
||||
];
|
||||
volumes = [
|
||||
"/:/host:ro,rslave"
|
||||
"/run/udev:/run/udev:ro"
|
||||
"/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:ro"
|
||||
"/etc/static/os-release:/host/etc/os-release:ro"
|
||||
];
|
||||
exec = [
|
||||
"--log.level=warn"
|
||||
"--path.rootfs=/host"
|
||||
"--no-collector.arp"
|
||||
"--no-collector.bonding"
|
||||
"--no-collector.edac"
|
||||
"--no-collector.fibrechannel"
|
||||
"--no-collector.infiniband"
|
||||
"--no-collector.ipvs"
|
||||
"--no-collector.mdadm"
|
||||
"--no-collector.nfs"
|
||||
"--no-collector.nfsd"
|
||||
"--no-collector.selinux"
|
||||
"--no-collector.xfs"
|
||||
"--no-collector.zfs"
|
||||
"--collector.cpu_vulnerabilities"
|
||||
"--collector.drm"
|
||||
"--collector.ethtool"
|
||||
"--collector.processes"
|
||||
"--collector.systemd"
|
||||
];
|
||||
};
|
||||
|
||||
prometheus-podman-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-podman-exporter}";
|
||||
publishPorts = [ "9882:9882" ];
|
||||
volumes = [ "/run/podman/podman.sock:/run/podman/podman.sock:ro" ];
|
||||
exec = [ "--collector.enable-all" ];
|
||||
};
|
||||
|
||||
prometheus-fail2ban-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-fail2ban-exporter}";
|
||||
publishPorts = [ "9191:9191" ];
|
||||
volumes = [ "/run/fail2ban/fail2ban.sock:/var/run/fail2ban/fail2ban.sock:ro" ];
|
||||
};
|
||||
};
|
||||
}
|
@@ -10,7 +10,7 @@ in
|
||||
"net.ipv4.conf.all.proxy_arp" = 1;
|
||||
};
|
||||
|
||||
sops.secrets."wireguard" = { };
|
||||
sops.secrets.wireguard = { };
|
||||
|
||||
networking = {
|
||||
firewall = {
|
||||
@@ -21,7 +21,7 @@ in
|
||||
wireguard.interfaces.wg0 = {
|
||||
ips = [ "10.0.0.1/24" ];
|
||||
listenPort = wireguardPort;
|
||||
privateKeyFile = config.sops.secrets."wireguard".path;
|
||||
privateKeyFile = config.sops.secrets.wireguard.path;
|
||||
|
||||
peers = [
|
||||
{
|
||||
|
@@ -6,10 +6,12 @@
|
||||
|
||||
./hardware
|
||||
|
||||
../common/configs/system/btrfs
|
||||
../common/configs/system/impermanence
|
||||
../common/configs/system/neovim
|
||||
../common/configs/system/nix
|
||||
../common/configs/system/nixpkgs
|
||||
../common/configs/system/podman
|
||||
../common/configs/system/sops
|
||||
../common/configs/system/sshd
|
||||
../common/configs/system/system
|
||||
@@ -17,6 +19,7 @@
|
||||
../common/configs/system/zsh
|
||||
|
||||
./configs/boot
|
||||
./configs/podman
|
||||
./configs/wireguard
|
||||
];
|
||||
|
||||
|
@@ -26,10 +26,22 @@
|
||||
name = "root";
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
mountOptions = [ "defaults" ];
|
||||
type = "btrfs";
|
||||
extraArgs = [ "-f" ];
|
||||
subvolumes =
|
||||
let
|
||||
mountOptions = [
|
||||
"compress=zstd:1"
|
||||
"noatime"
|
||||
"user_subvol_rm_allowed"
|
||||
];
|
||||
in
|
||||
{
|
||||
"@" = {
|
||||
mountpoint = "/";
|
||||
inherit mountOptions;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@@ -11,7 +11,7 @@ let
|
||||
jupiterPublicIPv4 = "51.89.210.124";
|
||||
in
|
||||
{
|
||||
sops.secrets."wireguard" = { };
|
||||
sops.secrets.wireguard = { };
|
||||
|
||||
networking = {
|
||||
firewall.allowedUDPPorts = [ wireguardPort ];
|
||||
@@ -33,7 +33,7 @@ in
|
||||
"${jupiterPublicIPv4}/32"
|
||||
];
|
||||
listenPort = wireguardPort;
|
||||
privateKeyFile = config.sops.secrets."wireguard".path;
|
||||
privateKeyFile = config.sops.secrets.wireguard.path;
|
||||
table = "wireguard";
|
||||
|
||||
postSetup = [ "${ip} rule add from ${jupiterPublicIPv4} table ${table}" ];
|
||||
|
@@ -23,7 +23,6 @@
|
||||
../common/configs/system/sshd
|
||||
../common/configs/system/sudo
|
||||
../common/configs/system/system
|
||||
../common/configs/system/telegraf
|
||||
../common/configs/system/users
|
||||
../common/configs/system/zsh
|
||||
|
||||
|
@@ -74,7 +74,7 @@ in
|
||||
jwks = [ { key = hmConfig.sops.placeholder."authelia/oidcKey"; } ];
|
||||
|
||||
authorization_policies = {
|
||||
admin = {
|
||||
admin_two_factor = {
|
||||
default_policy = "deny";
|
||||
rules = [
|
||||
{
|
||||
@@ -83,6 +83,16 @@ in
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
admin_one_factor = {
|
||||
default_policy = "deny";
|
||||
rules = [
|
||||
{
|
||||
policy = "one_factor";
|
||||
subject = [ "group:admins" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -105,6 +115,8 @@ in
|
||||
};
|
||||
|
||||
theme = "auto";
|
||||
|
||||
telemetry.metrics.enabled = true;
|
||||
}
|
||||
);
|
||||
|
||||
@@ -125,13 +137,13 @@ in
|
||||
networks.authelia.networkConfig.internal = true;
|
||||
|
||||
volumes = {
|
||||
"authelia-redis" = { };
|
||||
"authelia-postgresql" = { };
|
||||
authelia-redis = { };
|
||||
authelia-postgresql = { };
|
||||
authelia = { };
|
||||
};
|
||||
|
||||
containers = {
|
||||
"authelia-init" = {
|
||||
authelia-init = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-yq}";
|
||||
volumes = [
|
||||
@@ -140,7 +152,7 @@ in
|
||||
];
|
||||
exec = [
|
||||
"eval-all"
|
||||
". as $item ireduce ({}; . * $item)"
|
||||
". as $item ireduce ({}; . *+ $item)"
|
||||
"/etc/authelia/users.yaml"
|
||||
"/etc/authelia/users.yaml.default"
|
||||
"-i"
|
||||
@@ -167,6 +179,7 @@ in
|
||||
networks = [
|
||||
networks.authelia.ref
|
||||
networks.traefik.ref
|
||||
networks.prometheus.ref
|
||||
];
|
||||
exec = [
|
||||
"--config"
|
||||
@@ -183,18 +196,18 @@ in
|
||||
};
|
||||
|
||||
unitConfig.After = [
|
||||
"${containers."authelia-init"._serviceName}.service"
|
||||
"${containers."authelia-postgresql"._serviceName}.service"
|
||||
"${containers."authelia-redis"._serviceName}.service"
|
||||
"${containers.authelia-init._serviceName}.service"
|
||||
"${containers.authelia-postgresql._serviceName}.service"
|
||||
"${containers.authelia-redis._serviceName}.service"
|
||||
"sops-nix.service"
|
||||
];
|
||||
};
|
||||
|
||||
"authelia-postgresql" = {
|
||||
authelia-postgresql = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-postgresql}";
|
||||
networks = [ networks.authelia.ref ];
|
||||
volumes = [ "${volumes."authelia-postgresql".ref}:/var/lib/postgresql/data" ];
|
||||
volumes = [ "${volumes.authelia-postgresql.ref}:/var/lib/postgresql/data" ];
|
||||
environments = {
|
||||
POSTGRES_DB = "authelia";
|
||||
POSTGRES_USER = "authelia";
|
||||
@@ -205,12 +218,37 @@ in
|
||||
unitConfig.After = [ "sops-nix.service" ];
|
||||
};
|
||||
|
||||
"authelia-redis".containerConfig = {
|
||||
authelia-redis.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-redis}";
|
||||
networks = [ networks.authelia.ref ];
|
||||
volumes = [ "${volumes."authelia-redis".ref}:/var/lib/redis" ];
|
||||
volumes = [ "${volumes.authelia-redis.ref}:/var/lib/redis" ];
|
||||
exec = [ "--save 60 1" ];
|
||||
};
|
||||
|
||||
prometheus-init.containerConfig.volumes =
|
||||
let
|
||||
autheliaConfig = (pkgs.formats.yaml { }).generate "authelia.yml" {
|
||||
scrape_configs =
|
||||
let
|
||||
hostname = config.networking.hostName;
|
||||
in
|
||||
[
|
||||
{
|
||||
job_name = "${hostname}-authelia";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "authelia:9959" ];
|
||||
labels = {
|
||||
app = "authelia";
|
||||
inherit user hostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
[ "${autheliaConfig}:/etc/prometheus/conf.d/authelia.yml" ];
|
||||
};
|
||||
};
|
||||
|
||||
|
@@ -11,6 +11,7 @@ in
|
||||
(import ./authelia { inherit user home; })
|
||||
(import ./grafana { inherit user home; })
|
||||
(import ./ntfy { inherit user home; })
|
||||
(import ./prometheus { inherit user home; })
|
||||
(import ./traefik { inherit user home; })
|
||||
(import ./whoami { inherit user home; })
|
||||
];
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -13,7 +13,7 @@
|
||||
let
|
||||
selfPkgs = inputs.self.packages.${system};
|
||||
hmConfig = config.home-manager.users.${user};
|
||||
inherit (hmConfig.virtualisation.quadlet) volumes containers networks;
|
||||
inherit (hmConfig.virtualisation.quadlet) networks;
|
||||
autheliaClientId = "4R5ofTZgOjO5Nrbcm9f6KqBLZXy8LwPS5s3E3BUfPS2mRy0wSV41XZGLrLgiR4Z0MblyGzW211AHL7GCCaJu5KonLUKyRjoyuiAr";
|
||||
in
|
||||
{
|
||||
@@ -34,7 +34,7 @@ in
|
||||
client_name = "Grafana";
|
||||
client_secret = hmConfig.sops.placeholder."grafana/authelia/digest";
|
||||
redirect_uris = [ "https://stats.karaolidis.com/login/generic_oauth" ];
|
||||
authorization_policy = "admin";
|
||||
authorization_policy = "admin_one_factor";
|
||||
require_pkce = true;
|
||||
pkce_challenge_method = "S256";
|
||||
}
|
||||
@@ -122,87 +122,9 @@ in
|
||||
};
|
||||
|
||||
virtualisation.quadlet = {
|
||||
networks = {
|
||||
grafana.networkConfig.internal = true;
|
||||
# Allow access to host telegraf via non-internal network
|
||||
grafana-prometheus = { };
|
||||
};
|
||||
|
||||
volumes = {
|
||||
"grafana-prometheus-data" = { };
|
||||
"grafana-prometheus-config" = { };
|
||||
};
|
||||
networks.grafana.networkConfig.internal = true;
|
||||
|
||||
containers = {
|
||||
"grafana-prometheus-init" =
|
||||
let
|
||||
prometheusConfig = (pkgs.formats.yaml { }).generate "prometheus.yml" {
|
||||
global = {
|
||||
scrape_interval = "10s";
|
||||
evaluation_interval = "10s";
|
||||
};
|
||||
|
||||
scrape_configs = [
|
||||
{
|
||||
job_name = "telegraf";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "host.containers.internal:9273" ];
|
||||
labels.app = "telegraf";
|
||||
}
|
||||
{
|
||||
targets = [
|
||||
"host.containers.internal:${builtins.toString (9273 + config.users.users.${user}.uid)}"
|
||||
];
|
||||
labels.app = "telegraf-storm";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-yq}";
|
||||
volumes = [
|
||||
"${volumes."grafana-prometheus-config".ref}:/etc/prometheus"
|
||||
"${prometheusConfig}:/etc/prometheus/conf.d/prometheus.yml"
|
||||
];
|
||||
entrypoint = "/bin/bash";
|
||||
exec = [
|
||||
"-c"
|
||||
"yq eval-all '. as $item ireduce ({}; . * $item)' /etc/prometheus/conf.d/*.yml > /etc/prometheus/prometheus.yml"
|
||||
];
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
"grafana-prometheus" = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus}";
|
||||
volumes = [
|
||||
"${volumes."grafana-prometheus-config".ref}:/etc/prometheus"
|
||||
"${volumes."grafana-prometheus-data".ref}:/var/lib/prometheus"
|
||||
];
|
||||
networks = [
|
||||
networks.grafana.ref
|
||||
networks.grafana-prometheus.ref
|
||||
];
|
||||
exec = [
|
||||
"--config.file=/etc/prometheus/prometheus.yml"
|
||||
"--storage.tsdb.path=/var/lib/prometheus"
|
||||
"--storage.tsdb.retention.time=1y"
|
||||
"--log.level=warn"
|
||||
];
|
||||
};
|
||||
|
||||
unitConfig.After = [ "${containers."grafana-prometheus-init"._serviceName}.service" ];
|
||||
};
|
||||
|
||||
grafana = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-grafana}";
|
||||
@@ -217,17 +139,12 @@ in
|
||||
];
|
||||
};
|
||||
|
||||
unitConfig.After = [
|
||||
"${containers."grafana-prometheus"._serviceName}.service"
|
||||
"${containers."grafana-image-renderer"._serviceName}.service"
|
||||
];
|
||||
unitConfig.After = [ "sops-nix.service" ];
|
||||
};
|
||||
|
||||
"grafana-image-renderer" = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-grafana-image-renderer}";
|
||||
networks = [ networks.grafana.ref ];
|
||||
};
|
||||
grafana-image-renderer.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-grafana-image-renderer}";
|
||||
networks = [ networks.grafana.ref ];
|
||||
};
|
||||
|
||||
authelia.containerConfig.volumes = [
|
||||
|
@@ -71,6 +71,8 @@ in
|
||||
enable-signup = false;
|
||||
enable-login = true;
|
||||
enable-reservations = false;
|
||||
|
||||
metrics-listen-http = ":9090";
|
||||
}
|
||||
);
|
||||
|
||||
@@ -111,26 +113,54 @@ in
|
||||
|
||||
volumes.ntfy = { };
|
||||
|
||||
containers.ntfy = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-ntfy}";
|
||||
networks = [
|
||||
networks.ntfy.ref
|
||||
networks.traefik.ref
|
||||
];
|
||||
volumes = [
|
||||
"${volumes.ntfy.ref}:/var/lib/ntfy"
|
||||
"${hmConfig.sops.templates."ntfy-server.yml".path}:/etc/ntfy/server.yml:ro"
|
||||
"${hmConfig.sops.templates."ntfy-init.sh".path}:/entrypoint.sh:ro"
|
||||
];
|
||||
entrypoint = "/entrypoint.sh";
|
||||
labels = [
|
||||
"traefik.enable=true"
|
||||
"traefik.http.routers.ntfy.rule=Host(`ntfy.karaolidis.com`)"
|
||||
];
|
||||
containers = {
|
||||
ntfy = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-ntfy}";
|
||||
networks = [
|
||||
networks.ntfy.ref
|
||||
networks.traefik.ref
|
||||
networks.prometheus.ref
|
||||
];
|
||||
volumes = [
|
||||
"${volumes.ntfy.ref}:/var/lib/ntfy"
|
||||
"${hmConfig.sops.templates."ntfy-server.yml".path}:/etc/ntfy/server.yml:ro"
|
||||
"${hmConfig.sops.templates."ntfy-init.sh".path}:/entrypoint.sh:ro"
|
||||
];
|
||||
entrypoint = "/entrypoint.sh";
|
||||
labels = [
|
||||
"traefik.enable=true"
|
||||
"traefik.http.routers.ntfy.rule=Host(`ntfy.karaolidis.com`)"
|
||||
];
|
||||
};
|
||||
|
||||
unitConfig.After = [ "sops-nix.service" ];
|
||||
};
|
||||
|
||||
unitConfig.After = [ "sops-nix.service" ];
|
||||
prometheus-init.containerConfig.volumes =
|
||||
let
|
||||
ntfyConfig = (pkgs.formats.yaml { }).generate "ntfy.yml" {
|
||||
scrape_configs =
|
||||
let
|
||||
hostname = config.networking.hostName;
|
||||
in
|
||||
[
|
||||
{
|
||||
job_name = "${hostname}-ntfy";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "ntfy:9090" ];
|
||||
labels = {
|
||||
app = "ntfy.sh";
|
||||
inherit user hostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
[ "${ntfyConfig}:/etc/prometheus/conf.d/ntfy.yml" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@@ -0,0 +1,310 @@
|
||||
{
|
||||
user ? throw "user argument is required",
|
||||
home ? throw "home argument is required",
|
||||
}:
|
||||
{
|
||||
config,
|
||||
inputs,
|
||||
pkgs,
|
||||
system,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
selfPkgs = inputs.self.packages.${system};
|
||||
hmConfig = config.home-manager.users.${user};
|
||||
jupiterVpsConfig = inputs.self.nixosConfigurations.jupiter-vps.config;
|
||||
inherit (hmConfig.virtualisation.quadlet) volumes containers networks;
|
||||
in
|
||||
{
|
||||
boot.kernelParams = [ "psi=1" ];
|
||||
|
||||
# The below containers all need to run as root to collect host metrics.
|
||||
virtualisation.quadlet.containers = {
|
||||
prometheus-node-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-node-exporter}";
|
||||
# Allow collecting host metrics, port :9100 by default
|
||||
networks = [ "host" ];
|
||||
podmanArgs = [
|
||||
"--pid"
|
||||
"host"
|
||||
];
|
||||
volumes = [
|
||||
"/:/host:ro,rslave"
|
||||
"/run/udev:/run/udev:ro"
|
||||
"/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:ro"
|
||||
"/etc/static/os-release:/host/etc/os-release:ro"
|
||||
];
|
||||
exec = [
|
||||
"--log.level=warn"
|
||||
"--path.rootfs=/host"
|
||||
"--no-collector.arp"
|
||||
"--no-collector.bonding"
|
||||
"--no-collector.edac"
|
||||
"--no-collector.fibrechannel"
|
||||
"--no-collector.infiniband"
|
||||
"--no-collector.ipvs"
|
||||
"--no-collector.mdadm"
|
||||
"--no-collector.nfs"
|
||||
"--no-collector.nfsd"
|
||||
"--no-collector.selinux"
|
||||
"--no-collector.xfs"
|
||||
"--no-collector.zfs"
|
||||
"--collector.cpu_vulnerabilities"
|
||||
"--collector.drm"
|
||||
"--collector.ethtool"
|
||||
"--collector.processes"
|
||||
"--collector.systemd"
|
||||
];
|
||||
};
|
||||
|
||||
prometheus-podman-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-podman-exporter}";
|
||||
publishPorts = [ "9882:9882" ];
|
||||
volumes = [ "/run/podman/podman.sock:/run/podman/podman.sock:ro" ];
|
||||
exec = [ "--collector.enable-all" ];
|
||||
};
|
||||
|
||||
prometheus-fail2ban-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-fail2ban-exporter}";
|
||||
publishPorts = [ "9191:9191" ];
|
||||
volumes = [ "/run/fail2ban/fail2ban.sock:/var/run/fail2ban/fail2ban.sock:ro" ];
|
||||
};
|
||||
|
||||
prometheus-smartctl-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-smartctl-exporter}";
|
||||
publishPorts = [ "9633:9633" ];
|
||||
podmanArgs = [ "--privileged" ];
|
||||
};
|
||||
};
|
||||
|
||||
home-manager.users.${user} = {
|
||||
virtualisation.quadlet = {
|
||||
networks = {
|
||||
prometheus.networkConfig.internal = true;
|
||||
prometheus-ext = { };
|
||||
};
|
||||
|
||||
volumes = {
|
||||
prometheus-data = { };
|
||||
prometheus-config = { };
|
||||
};
|
||||
|
||||
containers = {
|
||||
prometheus-node-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-node-exporter}";
|
||||
networks = [ networks.prometheus.ref ];
|
||||
volumes =
|
||||
let
|
||||
uid = builtins.toString config.users.users.${user}.uid;
|
||||
in
|
||||
[ "/run/user/${uid}/bus:/var/run/dbus/system_bus_socket:ro" ];
|
||||
exec = [
|
||||
"--log.level=warn"
|
||||
"--path.rootfs=/host"
|
||||
"--collector.disable-defaults"
|
||||
"--collector.systemd"
|
||||
];
|
||||
};
|
||||
|
||||
prometheus-podman-exporter.containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus-podman-exporter}";
|
||||
networks = [ networks.prometheus.ref ];
|
||||
volumes =
|
||||
let
|
||||
uid = builtins.toString config.users.users.${user}.uid;
|
||||
in
|
||||
[ "/run/user/${uid}/podman/podman.sock:/run/podman/podman.sock:ro" ];
|
||||
exec = [ "--collector.enable-all" ];
|
||||
};
|
||||
|
||||
prometheus-init =
|
||||
let
|
||||
prometheusConfig = (pkgs.formats.yaml { }).generate "prometheus.yml" {
|
||||
global.scrape_interval = "15s";
|
||||
|
||||
scrape_configs =
|
||||
let
|
||||
hostname = config.networking.hostName;
|
||||
jupiterVpsHostname = jupiterVpsConfig.networking.hostName;
|
||||
in
|
||||
[
|
||||
{
|
||||
job_name = "${hostname}-node-exporter";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "host.containers.internal:9100" ];
|
||||
labels = {
|
||||
app = "node-exporter";
|
||||
user = "root";
|
||||
inherit hostname;
|
||||
};
|
||||
}
|
||||
{
|
||||
targets = [ "prometheus-node-exporter:9100" ];
|
||||
labels = {
|
||||
app = "node-exporter";
|
||||
inherit user hostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "${hostname}-podman-exporter";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "host.containers.internal:9882" ];
|
||||
labels = {
|
||||
app = "podman-exporter";
|
||||
user = "root";
|
||||
inherit hostname;
|
||||
};
|
||||
}
|
||||
{
|
||||
targets = [ "prometheus-podman-exporter:9882" ];
|
||||
labels = {
|
||||
app = "podman-exporter";
|
||||
inherit user hostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "${hostname}-fail2ban-exporter";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "host.containers.internal:9191" ];
|
||||
labels = {
|
||||
app = "fail2ban-exporter";
|
||||
user = "root";
|
||||
inherit hostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "${hostname}-smartctl-exporter";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "host.containers.internal:9633" ];
|
||||
labels = {
|
||||
app = "smartctl-exporter";
|
||||
user = "root";
|
||||
inherit hostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "${jupiterVpsHostname}-node-exporter";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "10.0.0.1:9100" ];
|
||||
labels = {
|
||||
app = "node-exporter";
|
||||
user = "root";
|
||||
hostname = jupiterVpsHostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "${jupiterVpsHostname}-podman-exporter";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "10.0.0.1:9882" ];
|
||||
labels = {
|
||||
app = "podman-exporter";
|
||||
user = "root";
|
||||
hostname = jupiterVpsHostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "${jupiterVpsHostname}-fail2ban-exporter";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "10.0.0.1:9191" ];
|
||||
labels = {
|
||||
app = "fail2ban-exporter";
|
||||
user = "root";
|
||||
hostname = jupiterVpsHostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-yq}";
|
||||
volumes = [
|
||||
"${volumes.prometheus-config.ref}:/etc/prometheus"
|
||||
"${prometheusConfig}:/etc/prometheus/conf.d/prometheus.yml"
|
||||
];
|
||||
entrypoint = "/bin/bash";
|
||||
exec = [
|
||||
"-c"
|
||||
"yq eval-all '. as $item ireduce ({}; . *+ $item)' /etc/prometheus/conf.d/*.yml > /etc/prometheus/prometheus.yml"
|
||||
];
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
prometheus = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-prometheus}";
|
||||
volumes = [
|
||||
"${volumes.prometheus-config.ref}:/etc/prometheus"
|
||||
"${volumes.prometheus-data.ref}:/var/lib/prometheus"
|
||||
];
|
||||
networks = [
|
||||
networks.grafana.ref
|
||||
networks.prometheus.ref
|
||||
# Access to root exporters
|
||||
networks.prometheus-ext.ref
|
||||
];
|
||||
exec = [
|
||||
"--log.level=debug"
|
||||
"--config.file=/etc/prometheus/prometheus.yml"
|
||||
"--storage.tsdb.path=/var/lib/prometheus"
|
||||
"--storage.tsdb.retention.time=1y"
|
||||
];
|
||||
};
|
||||
|
||||
unitConfig.After = [ "${containers.prometheus-init._serviceName}.service" ];
|
||||
};
|
||||
|
||||
grafana.containerConfig.volumes =
|
||||
let
|
||||
datasource = (pkgs.formats.yaml { }).generate "prometheus.yaml" {
|
||||
apiVersion = 1;
|
||||
|
||||
datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
type = "prometheus";
|
||||
access = "proxy";
|
||||
url = "http://prometheus:9090";
|
||||
uid = "prometheus";
|
||||
jsonData = {
|
||||
httpMethod = "POST";
|
||||
manageAlerts = true;
|
||||
prometheusType = "Prometheus";
|
||||
prometheusVersion = lib.strings.getVersion pkgs.prometheus;
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
[ "${datasource}:/etc/grafana/conf/provisioning/datasources/prometheus.yaml" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@@ -43,7 +43,10 @@ in
|
||||
traefik = {
|
||||
containerConfig = {
|
||||
image = "docker-archive:${selfPkgs.docker-traefik}";
|
||||
networks = [ networks.traefik.ref ];
|
||||
networks = [
|
||||
networks.traefik.ref
|
||||
networks.prometheus.ref
|
||||
];
|
||||
volumes =
|
||||
let
|
||||
uid = builtins.toString config.users.users.${user}.uid;
|
||||
@@ -84,6 +87,8 @@ in
|
||||
"--certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare"
|
||||
"--certificatesresolvers.letsencrypt.acme.email=nick@karaolidis.com"
|
||||
"--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
|
||||
|
||||
"--metrics.prometheus=true"
|
||||
];
|
||||
labels = [
|
||||
"traefik.enable=true"
|
||||
@@ -135,12 +140,37 @@ in
|
||||
};
|
||||
in
|
||||
[ "${config}:/etc/authelia/conf.d/traefik.yaml:ro" ];
|
||||
|
||||
prometheus-init.containerConfig.volumes =
|
||||
let
|
||||
traefikConfig = (pkgs.formats.yaml { }).generate "traefik.yml" {
|
||||
scrape_configs =
|
||||
let
|
||||
hostname = config.networking.hostName;
|
||||
in
|
||||
[
|
||||
{
|
||||
job_name = "${hostname}-traefik";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "traefik:8080" ];
|
||||
labels = {
|
||||
app = "traefik";
|
||||
inherit user hostname;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
[ "${traefikConfig}:/etc/prometheus/conf.d/traefik.yml" ];
|
||||
};
|
||||
};
|
||||
|
||||
# https://github.com/eriksjolund/podman-traefik-socket-activation
|
||||
systemd.user.sockets = {
|
||||
"traefik-http" = {
|
||||
traefik-http = {
|
||||
Socket = {
|
||||
ListenStream = "0.0.0.0:80";
|
||||
FileDescriptorName = "http";
|
||||
@@ -152,7 +182,7 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
"traefik-https" = {
|
||||
traefik-https = {
|
||||
Socket = {
|
||||
ListenStream = "0.0.0.0:443";
|
||||
FileDescriptorName = "https";
|
||||
|
@@ -13,7 +13,6 @@ in
|
||||
(import ../../../common/configs/user/console/neovim { inherit user home; })
|
||||
(import ../../../common/configs/user/console/podman { inherit user home; })
|
||||
(import ../../../common/configs/user/console/sops { inherit user home; })
|
||||
(import ../../../common/configs/user/console/telegraf { inherit user home; })
|
||||
(import ../../../common/configs/user/console/tmux { inherit user home; })
|
||||
(import ../../../common/configs/user/console/zsh { inherit user home; })
|
||||
|
||||
|
@@ -16,6 +16,16 @@
|
||||
docker-ntfy = import ./docker/ntfy { inherit pkgs; };
|
||||
docker-postgresql = import ./docker/postgresql { inherit pkgs; };
|
||||
docker-prometheus = import ./docker/prometheus { inherit pkgs; };
|
||||
docker-prometheus-fail2ban-exporter = import ./docker/prometheus-fail2ban-exporter {
|
||||
inherit pkgs inputs system;
|
||||
};
|
||||
docker-prometheus-node-exporter = import ./docker/prometheus-node-exporter { inherit pkgs; };
|
||||
docker-prometheus-podman-exporter = import ./docker/prometheus-podman-exporter {
|
||||
inherit pkgs inputs system;
|
||||
};
|
||||
docker-prometheus-smartctl-exporter = import ./docker/prometheus-smartctl-exporter {
|
||||
inherit pkgs;
|
||||
};
|
||||
docker-redis = import ./docker/redis { inherit pkgs; };
|
||||
docker-traefik = import ./docker/traefik { inherit pkgs; };
|
||||
docker-whoami = import ./docker/whoami { inherit pkgs; };
|
||||
@@ -38,6 +48,9 @@
|
||||
|
||||
obsidian-theme-minimal = import ./obsidian/themes/minimal { inherit pkgs; };
|
||||
|
||||
prometheus-fail2ban-exporter = import ./prometheus-fail2ban-exporter { inherit pkgs; };
|
||||
prometheus-podman-exporter = import ./prometheus-podman-exporter { inherit pkgs; };
|
||||
|
||||
ssh-known-hosts-github = import ./ssh/known-hosts/github { inherit pkgs inputs system; };
|
||||
|
||||
# SAS
|
||||
|
@@ -14,7 +14,7 @@ mkfifo "$LOG_PIPE"
|
||||
|
||||
(
|
||||
while IFS= read -r line; do
|
||||
if echo "$line" | grep -qE "ERROR|FATAL|PANIC"; then
|
||||
if echo "$line" | grep -qEi "ERROR|FATAL|PANIC"; then
|
||||
echo "$line" >&2
|
||||
else
|
||||
echo "$line"
|
||||
|
36
packages/docker/prometheus-fail2ban-exporter/default.nix
Normal file
36
packages/docker/prometheus-fail2ban-exporter/default.nix
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
pkgs,
|
||||
inputs,
|
||||
system,
|
||||
...
|
||||
}:
|
||||
let
|
||||
selfPkgs = inputs.self.packages.${system};
|
||||
|
||||
entrypoint = pkgs.writeTextFile {
|
||||
name = "entrypoint";
|
||||
executable = true;
|
||||
destination = "/bin/entrypoint";
|
||||
text = builtins.readFile ./entrypoint.sh;
|
||||
};
|
||||
in
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = "prometheus-fail2ban-exporter";
|
||||
fromImage = import ../base { inherit pkgs; };
|
||||
|
||||
copyToRoot = pkgs.buildEnv {
|
||||
name = "root";
|
||||
paths = [
|
||||
entrypoint
|
||||
selfPkgs.prometheus-fail2ban-exporter
|
||||
];
|
||||
pathsToLink = [ "/bin" ];
|
||||
};
|
||||
|
||||
config = {
|
||||
Entrypoint = [ "/bin/entrypoint" ];
|
||||
ExposedPorts = {
|
||||
"9191/tcp" = { };
|
||||
};
|
||||
};
|
||||
}
|
23
packages/docker/prometheus-fail2ban-exporter/entrypoint.sh
Normal file
23
packages/docker/prometheus-fail2ban-exporter/entrypoint.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
mkdir -p /tmp
|
||||
LOG_PIPE="$(mktemp -u)"
|
||||
mkfifo "$LOG_PIPE"
|
||||
|
||||
(
|
||||
while IFS= read -r line; do
|
||||
if echo "$line" | grep -qEi "WARN|ERROR|FATAL"; then
|
||||
echo "$line" >&2
|
||||
else
|
||||
echo "$line"
|
||||
fi
|
||||
done < "$LOG_PIPE"
|
||||
) &
|
||||
LOG_PID=$!
|
||||
|
||||
trap 'kill $LOG_PID' EXIT
|
||||
|
||||
exec /bin/prometheus-fail2ban-exporter "$@" > "$LOG_PIPE" 2>&1
|
19
packages/docker/prometheus-node-exporter/default.nix
Normal file
19
packages/docker/prometheus-node-exporter/default.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{ pkgs, ... }:
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = "prometheus-node-exporter";
|
||||
fromImage = import ../base { inherit pkgs; };
|
||||
|
||||
copyToRoot = pkgs.buildEnv {
|
||||
name = "root";
|
||||
paths = with pkgs; [ prometheus-node-exporter ];
|
||||
pathsToLink = [ "/bin" ];
|
||||
};
|
||||
|
||||
config = {
|
||||
Entrypoint = [ "/bin/node_exporter" ];
|
||||
Cmd = [ "--log.level=warn" ];
|
||||
ExposedPorts = {
|
||||
"9100/tcp" = { };
|
||||
};
|
||||
};
|
||||
}
|
40
packages/docker/prometheus-podman-exporter/default.nix
Normal file
40
packages/docker/prometheus-podman-exporter/default.nix
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
pkgs,
|
||||
inputs,
|
||||
system,
|
||||
...
|
||||
}:
|
||||
let
|
||||
selfPkgs = inputs.self.packages.${system};
|
||||
|
||||
entrypoint = pkgs.writeTextFile {
|
||||
name = "entrypoint";
|
||||
executable = true;
|
||||
destination = "/bin/entrypoint";
|
||||
text = builtins.readFile ./entrypoint.sh;
|
||||
};
|
||||
in
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = "prometheus-podman-exporter";
|
||||
fromImage = import ../base { inherit pkgs; };
|
||||
|
||||
copyToRoot = pkgs.buildEnv {
|
||||
name = "root";
|
||||
paths = [
|
||||
entrypoint
|
||||
selfPkgs.prometheus-podman-exporter
|
||||
];
|
||||
pathsToLink = [ "/bin" ];
|
||||
};
|
||||
|
||||
runAsRoot = ''
|
||||
${pkgs.dockerTools.shadowSetup}
|
||||
'';
|
||||
|
||||
config = {
|
||||
Entrypoint = [ "/bin/entrypoint" ];
|
||||
ExposedPorts = {
|
||||
"9882/tcp" = { };
|
||||
};
|
||||
};
|
||||
}
|
23
packages/docker/prometheus-podman-exporter/entrypoint.sh
Normal file
23
packages/docker/prometheus-podman-exporter/entrypoint.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
mkdir -p /tmp
|
||||
LOG_PIPE="$(mktemp -u)"
|
||||
mkfifo "$LOG_PIPE"
|
||||
|
||||
(
|
||||
while IFS= read -r line; do
|
||||
if echo "$line" | grep -qEi "WARN|ERROR|FATAL"; then
|
||||
echo "$line" >&2
|
||||
else
|
||||
echo "$line"
|
||||
fi
|
||||
done < "$LOG_PIPE"
|
||||
) &
|
||||
LOG_PID=$!
|
||||
|
||||
trap 'kill $LOG_PID' EXIT
|
||||
|
||||
exec /bin/prometheus-podman-exporter "$@" > "$LOG_PIPE" 2>&1
|
19
packages/docker/prometheus-smartctl-exporter/default.nix
Normal file
19
packages/docker/prometheus-smartctl-exporter/default.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{ pkgs, ... }:
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = "prometheus-smartctl-exporter";
|
||||
fromImage = import ../base { inherit pkgs; };
|
||||
|
||||
copyToRoot = pkgs.buildEnv {
|
||||
name = "root";
|
||||
paths = with pkgs; [ prometheus-smartctl-exporter ];
|
||||
pathsToLink = [ "/bin" ];
|
||||
};
|
||||
|
||||
config = {
|
||||
Entrypoint = [ "/bin/smartctl_exporter" ];
|
||||
Cmd = [ "--log.level=warn" ];
|
||||
ExposedPorts = {
|
||||
"9633/tcp" = { };
|
||||
};
|
||||
};
|
||||
}
|
@@ -14,5 +14,8 @@ pkgs.dockerTools.buildImage {
|
||||
ExposedPorts = {
|
||||
"9090/tcp" = { };
|
||||
};
|
||||
Volumes = {
|
||||
"/var/lib/prometheus" = { };
|
||||
};
|
||||
};
|
||||
}
|
||||
|
31
packages/prometheus-fail2ban-exporter/default.nix
Normal file
31
packages/prometheus-fail2ban-exporter/default.nix
Normal file
@@ -0,0 +1,31 @@
|
||||
{ pkgs, ... }:
|
||||
# AUTO-UPDATE: nix-update --flake prometheus-fail2ban-exporter
|
||||
pkgs.buildGoModule rec {
|
||||
pname = "prometheus-fail2ban-exporter";
|
||||
version = "0.10.3";
|
||||
|
||||
src = pkgs.fetchFromGitLab {
|
||||
owner = "hctrdev";
|
||||
repo = "fail2ban-prometheus-exporter";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-CyYGY6SovnvgExB22G+LEKRDRzbDZWhWUjctJMkprYs=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-ogdRXbS1EG402qlnj5SfuI/1P/Pi0+xwJrJsc6vwdds=";
|
||||
|
||||
env.CGO_ENABLED = 0;
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X main.version=${version}"
|
||||
"-X main.commit=${version}"
|
||||
"-X main.date=1970-01-01T00:00:00Z"
|
||||
"-X main.builtBy=NixOS"
|
||||
];
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin
|
||||
cp -r "$GOPATH/bin/fail2ban-prometheus-exporter" $out/bin/prometheus-fail2ban-exporter
|
||||
'';
|
||||
}
|
51
packages/prometheus-podman-exporter/default.nix
Normal file
51
packages/prometheus-podman-exporter/default.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
{ pkgs, ... }:
|
||||
# AUTO-UPDATE: nix-update --flake prometheus-podman-exporter
|
||||
pkgs.buildGoModule rec {
|
||||
pname = "prometheus-podman-exporter";
|
||||
version = "1.15.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "containers";
|
||||
repo = "prometheus-podman-exporter";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-eXuLiJx0WsPlPAN5ZwQIp89thXiNS6AGE9p3aqjD+K8=";
|
||||
};
|
||||
|
||||
vendorHash = null;
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
makeWrapper
|
||||
pkg-config
|
||||
];
|
||||
|
||||
buildInputs = with pkgs; [
|
||||
btrfs-progs
|
||||
gpgme
|
||||
];
|
||||
|
||||
tags = [
|
||||
"containers_image_openpgp"
|
||||
"remote"
|
||||
];
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X github.com/containers/prometheus-podman-exporter/cmd.buildVersion=${version}"
|
||||
"-X github.com/containers/prometheus-podman-exporter/cmd.buildRevision=${builtins.head (pkgs.lib.strings.splitString "." version)}"
|
||||
"-X github.com/containers/prometheus-podman-exporter/cmd.buildBranch=HEAD"
|
||||
];
|
||||
|
||||
# Don't run tests because they require a running podman daemon
|
||||
doCheck = false;
|
||||
|
||||
postInstall = ''
|
||||
wrapProgram $out/bin/prometheus-podman-exporter \
|
||||
--run '
|
||||
: "''${CONTAINER_HOST:=}"
|
||||
if [ -z "$CONTAINER_HOST" ]; then
|
||||
export CONTAINER_HOST=$([ "$UID" -eq 0 ] && echo "unix:///run/podman/podman.sock" || echo "unix://''${XDG_RUNTIME_DIR:-/run/user/$UID}/podman/podman.sock")
|
||||
fi
|
||||
'
|
||||
'';
|
||||
}
|
Reference in New Issue
Block a user