holy moly we're almost there

This commit is contained in:
Anish Lakhwara
2022-09-19 08:13:50 +10:00
commit 3693732aac
203 changed files with 17247 additions and 0 deletions
+1
View File
@@ -0,0 +1 @@
{ }
+12
View File
@@ -0,0 +1,12 @@
{ pkgs, ... }:
{
hardware.bluetooth.enable = true;
services.blueman.enable = true;
# A2DP protocol
hardware.bluetooth.settings = {
General = {
Enable = "Source,Sink,Media,Socket";
};
};
}
+34
View File
@@ -0,0 +1,34 @@
{
# Assumes the remote borg backup server has already been initialized
# borg init --encryption-key=repokey-blake2 -rsh 'ssh -i /run/keys/id_ed25519_borgbase' 20779@hk-s020.rsync.net:<borg-archive>
services.borgbackup.jobs = {
backupTaskwarriorRsync = {
paths = [ "/var/lib/taskserver" ];
doInit = true;
repo = "20779@hk-s020.rsync.net:taskwarrior";
encryption = {
mode = "repokey-blake2";
passCommand = "cat /run/keys/rsync";
};
environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_borgbase"; };
compression = "auto,lzma";
startAt = "weekly";
extraArgs = "--remote-path=borg1";
};
};
services.borgbackup.jobs = {
backupShaarliRsync = {
paths = [ "/var/www/shaarli-config/data/datastore.php" "/var/www/shaarli-config/config" "/var/www/shaarli-config/.json.config" ];
doInit = true;
repo = "20779@hk-s020.rsync.net:shaarli";
encryption = {
mode = "repokey-blake2";
passCommand = "cat /run/keys/rsync";
};
environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_borgbase"; };
compression = "auto,lzma";
startAt = "weekly";
extraArgs = "--remote-path=borg1";
};
};
}
+11
View File
@@ -0,0 +1,11 @@
{ pkgs, lib, ... }:
let
folder = ./.;
toImport = name: value: folder + ("/" + name);
filterCaches = key: value: value == "regular" && lib.hasSuffix ".nix" key && key != "default.nix";
imports = lib.mapAttrsToList toImport (lib.filterAttrs filterCaches (builtins.readDir folder));
in
{
inherit imports;
nix.binaryCaches = [ "https://cache.nixos.org/" ];
}
+10
View File
@@ -0,0 +1,10 @@
{
nix = {
binaryCaches = [
"https://nix-community.cachix.org"
];
binaryCachePublicKeys = [
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
];
};
}
+10
View File
@@ -0,0 +1,10 @@
{
nix = {
binaryCaches = [
"https://nrdxp.cachix.org"
];
binaryCachePublicKeys = [
"nrdxp.cachix.org-1:Fc5PSqY2Jm1TrWfm88l6cvGWwz3s93c6IOifQWnhNW4="
];
};
}
+37
View File
@@ -0,0 +1,37 @@
{
# Unnecessary at this stage
#services.calibre-web = {
# enable = true;
# listen.port = 8083;
# openFirewall = true;
# # Bug in the module puts this in quotes in the systemd file
# # user = calibre;
# # group = calibre;
# options = {
# calibreLibrary = "/data/books";
# enableBookUploading = true;
# };
#};
services.calibre-server = {
enable = true;
libraries = [ "/data/books" ];
# Bug in the module puts this in quotes in the systemd file
# user = calibre;
# group = calibre;
};
services.nginx.virtualHosts."books.mossnet.lan" = {
enableACME = false;
forceSSL = false;
locations."/" = {
extraConfig = ''
proxy_pass http://localhost:8083/;
proxy_set_header X-Forwarded-Host $host;
'';
};
};
networking.firewall.allowedTCPPorts = [ 8080 ];
}
+132
View File
@@ -0,0 +1,132 @@
{ config, pkgs, ... }:
{
# imports = [
# ./nvim.nix
# ./unstable.nix
# ];
environment.systemPackages = with pkgs; [
htop
iftop
wget
curl
exa
bat
fd
ag
viu
w3m
ranger
ripgrep
tcpdump
whois
mtr
file
lsof
atool
inotify-tools
strace
zip
unzip
rsync
tmux
pwgen
glow
gitAndTools.gitFull
dig
wpa_supplicant
#neovim defined in ./nvim.nix
#wofi
#firefox
#fractal
#pinentry_gnome
pass
gcc
less
mpv
zathura
#qmk_firmware
python3
gdb
#vcv-rack
#xdotool
#neofetch
#calibre
#openvpn
#gimp
xxd
#tilix
kitty
taskwarrior
gnupg
#openjdk
#glslviewer
#filezilla
#signal-desktop
#wire-desktop
#tdesktop
#feedreader
#newsflash
#syncthing
dijo
#kdeconnect
#ssb-patchwork
#gnome3.gnome-tweaks
#gnome3.pomodoro
#gnomeExtensions.gsconnect
#gnomeExtensions.taskwhisperer
#gnome3.networkmanager-openvpn
#python3Packages.youtube-dl
#appimage-run
#home-manager
libreoffice
fontconfig
#hugo
#processing
#nextcloud-client
#zsh-powerlevel10k
powerline-fonts
#supercollider
haskellPackages.tidal
pandoc
];
#programs.bash.enableCompletion = true;
environment.shellAliases = {
"cat": "bat";
"ls":"exa";
"grep": "rg";
};
# needed for vcv-rack
#nixpkgs.config.allowUnfree = true;
programs.gnupg.agent.enable = true;
programs.gnupg.agent.pinentryFlavor = "curses";
programs.gnupg.agent.enableSSHSupport = false;
#system.copySystemConfiguration = true;
#services.syncthing = {
# enable = true;
# openDefaultPorts = true;
# systemService = true;
# user = "anish";
# dataDir = "/home/anish/usr/syncthing";
#};
#zsh powerlevel10k
#programs.zsh.enable = true;
#programs.zsh.promptInit = "source ${pkgs.zsh-powerlevel10k}/share/zsh-powerlevel10k/powerlevel10k.zsh-theme";
#users.users.anish.shell = pkgs.zsh;
#android
programs.adb.enable = true;
users.users.anish.extraGroups = ["adbusers"];
nix.extraOptions = ''
keep-outputs = true
keep-derivations = true
'';
}
+49
View File
@@ -0,0 +1,49 @@
{ self, config, lib, pkgs, ... }:
let inherit (lib) fileContents;
in
{
imports = [ ../cachix ];
nix.systemFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
fonts = {
fonts = with pkgs; [ powerline-fonts dejavu_fonts ];
fontconfig.defaultFonts = {
monospace = [ "DejaVu Sans Mono for Powerline" ];
sansSerif = [ "DejaVu Sans" ];
};
};
nix = {
autoOptimiseStore = true;
gc.automatic = true;
optimise.automatic = true;
useSandbox = true;
allowedUsers = [ "@wheel" ];
trustedUsers = [ "root" "@wheel" ];
extraOptions = ''
min-free = 536870912
keep-outputs = true
keep-derivations = true
fallback = true
'';
};
environment.systemPackages = with pkgs; [
pinentry_gnome
cached-nix-shell
];
services.devmon.enable = true;
# For rage encryption, all hosts need a ssh key pair
services.openssh = {
enable = true;
openFirewall = lib.mkDefault false;
};
programs.gnupg.agent.enable = true;
#programs.gnupg.agent.pinentryFlavor = "curses";
services.earlyoom.enable = true;
}
+92
View File
@@ -0,0 +1,92 @@
[aws]
symbol = " "
[character]
success_symbol = "[](bold purple)"
vicmd_symbol = "[](bold purple)"
[battery]
full_symbol = " "
charging_symbol = " "
discharging_symbol = " "
[conda]
symbol = " "
[directory]
style = "cyan"
read_only = " 🔒"
[docker_context]
symbol = " "
[elixir]
symbol = " "
[elm]
symbol = " "
[git_branch]
format = "[$symbol$branch]($style) "
symbol = " "
style = "bold dimmed white"
[git_status]
format = '([「$all_status$ahead_behind」]($style) )'
conflicted = "⚠️"
ahead = "⟫${count} "
behind = "⟪${count}"
diverged = "🔀 "
untracked = "📁 "
stashed = "↪ "
modified = "𝚫 "
staged = "✔ "
renamed = "⇆ "
deleted = "✘ "
style = "bold bright-white"
[golang]
symbol = " "
[hg_branch]
symbol = " "
[java]
symbol = " "
[julia]
symbol = " "
[memory_usage]
symbol = " "
disabled = false
[nim]
symbol = " "
[nix_shell]
format = '[$symbol$state]($style) '
symbol = " "
pure_msg = "λ"
impure_msg = "⎔"
[nodejs]
symbol = " "
[package]
symbol = " "
[php]
symbol = " "
[python]
symbol = " "
[ruby]
symbol = " "
[rust]
symbol = " "
[status]
disabled = false
+42
View File
@@ -0,0 +1,42 @@
{ pkgs, ... }:
{
systemd.services.battery-low = {
serviceConfig.Type = "simple";
path = [
pkgs.acpi
pkgs.gawk # I was too lazy to find out where awk lives, it's not coreutils, hopefully it works the same
pkgs.libnotify
];
startAt = "*08:00:00";
script = ''
#!/usr/bin/env bash
if [ `acpi -b | grep "Battery 0" | gawk ' { print ($3)}'` == "Discharging," ] ; then
# Discharging
# Monitor for low battery
rm /tmp/battery-full
if [ `acpi -b | grep "Battery 0" | gawk ' { print ($4)-0}'` -le "15" ] ; then
notify-send -u critical "Battery Low";
fi
else
# Charging
if [ `acpi -b | grep "Battery 0" | gawk ' { print ($4)-0}'` -ge "94" ] ; then
# Fully charged
if [[ -f /tmp/battery-full ]]; then
touch /tmp/battery-full;
notify-send "Battery Full"
fi
fi
fi
'';
serviceConfig = {
User = "anish";
Environment = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus";
};
};
systemd.timers.battery-low = {
wantedBy = [ "timers.target" ];
partOf = [ "battery-low.service" ];
timerConfig.OnCalendar = [ "*:0/5" ];
};
}
+201
View File
@@ -0,0 +1,201 @@
{ config, pkgs, lib, ... }:
let
dracula-gtk = pkgs.fetchFromGitHub {
owner = "dracula";
repo = "gtk";
rev = "502f212d83bc67e8f0499574546b99ec6c8e16f9";
sha256 = "1wx9nzq7cqyvpaq4j60bs8g7gh4jk8qg4016yi4c331l4iw1ymsa";
};
in
{
# TODO modularize
imports = [ ./battery-low-timer.nix ];
services = {
gnome.gnome-keyring.enable = true;
upower.enable = true;
dbus = {
enable = true;
packages = [ pkgs.dconf ];
};
};
security.pam.services.sddm.enableGnomeKeyring = true;
environment.sessionVariables = rec {
XDG_CACHE_HOME = "\${HOME}/.cache";
XDG_CONFIG_HOME = "\${HOME}/.config";
XDG_BIN_HOME = "\${HOME}/.local/bin";
XDG_DATA_HOME = "\${HOME}/.local/share";
PATH = [
"\${XDG_BIN_HOME}"
];
};
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
"ripcord"
"vcv-rack"
];
environment.systemPackages = with pkgs; [
signal-desktop # bridge to sealight?
scrot
ripcord
feh
sxiv
xkblayout-state
sublime-music
vcv-rack
zathura
calibre
nheko
mpv
newsflash
zeal
xclip
xdotool
rofi
rofimoji
rofi-calc
eww
obs-studio
lightdm
dunst
libnotify
(polybar.override {
pulseSupport = true;
nlSupport = true;
})
papirus-icon-theme
calendar-cli
wyrd
tootle
];
location.provider = "geoclue2";
services = {
redshift = {
enable = true;
temperature = {
day = 5500;
night = 3700;
};
};
xserver = {
enable = true;
layout = "us,dvorak";
desktopManager.wallpaper.mode = "fill";
displayManager = {
defaultSession = "none+bspwm";
sessionCommands = ''
${pkgs.xorg.xrdb}/bin/xrdb -merge <<EOF
#define blk #1F2430
#define bblk #F28779
#define red #A6CC70
#define bred #FFCC66
#define grn #5CCFE6
#define bgrn #F29E74
#define ylw #77A8D9
#define bylw #5C6773
#define blu #707A8C
#define bblu #F27983
#define mag #BAE67E
#define bmag #FFD580
#define cyn #73D0FF
#define bcyn #FFA759
#define wht #95E6CB
#define bwht #CBCCC6
#define bg blk
#define fg wht
*.foreground: fg
*.background: bg
*.cursorColor: mag
*.color0: blk
*.color8: bblk
*.color1: red
*.color9: bred
*.color2: grn
*.color10: bgrn
*.color3: ylw
*.color11: bylw
*.color4: blu
*.color12: bblu
*.color5: mag
*.color13: bmag
*.color6: cyn
*.color14: bcyn
*.color7: wht
*.color15: bwht
! greys
*.color234: #1E2029
*.color235: #282a36
*.color236: #373844
*.color237: #44475a
*.color239: #565761
*.color240: #6272a4
*.color241: #b6b6b2
EOF
# hotplug
connect() {
xrandr --output HDMI-2 --same-as eDP-1
}
disconnect() {
xrandr --output HDMI-2 --off
}
xrandr | grep "HDMI-2 connected" &>>/dev/null && connect || disconnect
# keyboard on curve is busted
get_keyboard_id() {
xinput list | grep 'AT Translated Set' | cut -f2 | cut -d'=' -f2 | xinput float
}
disconnect_keyboard() {
id=$(get_keyboard_id)
xinput float $id
unset id
}
attach_keyboard() {
id=$(get_keyboard_id)
xinput reattach $id 3
}
disconnect_keyboard
'';
# lightdm.enable = true;
# lightdm.greeters.mini.enable = true;
# lightdm.greeters.mini.user = "anish"; # TODO hardcoded
# lightdm.background = "/etc/nixos/users/profiles/desktop/background.jpg";
# lightdm.greeters.mini.extraConfig = ''
# text-color = "#ff79c6"
# password-background-color = "#1E2029"
# window-color = "#181a23"
# border-color = "#181a23"
# '';
};
windowManager.bspwm.enable = true;
#windowManager.bspwm.configFile = "/home/anish/.bspwm/bspwmrc";
windowManager.bspwm.sxhkd.configFile = "/home/anish/.config/sxhkdrc";
};
};
fonts.fonts = with pkgs; [
fira-code
fira-code-symbols
hermit
#hack
siji
font-awesome
proggyfonts
(nerdfonts.override { fonts = [ "FiraCode" "DroidSansMono" "Iosevka" ]; })
];
}
+32
View File
@@ -0,0 +1,32 @@
{ pkgs, ... }:
{
# TODO
# scrappy script that requires dhyan be cloned to /home/anish/usr/dhyan
# dhyan needs secrets, and internet access to install dependencies, havent figured that out with nix yet
# dhyan also depends on jdk11, but that's only to install dependencies, run `bb -m dhyan.main` once before installing this script
# If you've updated dhyan, you'll want to cd to /home/anish/usr/dhyan and run git pull
systemd.services.dhyan = {
serviceConfig.Type = "oneshot";
path = [
pkgs.babashka
pkgs.git
pkgs.jdk11
pkgs.curl
];
startAt = "*08:00:00";
script = ''
cd /home/anish/usr/dhyan
source .envrc
bb -m dhyan.main
'';
serviceConfig = {
User = "anish";
};
};
systemd.timers.dhyan = {
wantedBy = [ "timers.target" ];
partOf = [ "kitaab-sync.service" ];
timerConfig.OnCalendar = [ "08:00:00" ];
};
}
+16
View File
@@ -0,0 +1,16 @@
{ config, pkgs, lib, ... }:
{
services.dnsmasq.enable = true;
services.dnsmasq.extraConfig = ''
domain-needed
no-resolv
local=/lan/
local=/moss/
cache-size=5000
addn-hosts=/etc/adblock.hosts
''; # find a way to make adblock hosts reproducible and updateable
services.dnsmasq.servers = [ "45.90.30.49" "45.90.28.49" "1.1.1.1" "8.8.8.8" ];
networking.hosts = { "192.168.1.240" = [ "mossnet.lan" "links.mossnet.lan" "read.mossnet.lan" "stats.mossnet.lan" "music.mossnet.lan" "rss.mossnet.lan" "tasks.mossnet.lan" "file.mossnet.lan" "books.mossnet.lan" ]; };
networking.firewall.allowedTCPPorts = [ 53 ];
networking.firewall.allowedUDPPorts = [ 53 ];
}
+20
View File
@@ -0,0 +1,20 @@
{ pkgs, lib, ... }:
{
networking.firewall.allowedTCPPorts = [ 20 21 ];
networking.firewall.allowedTCPPortRanges = [{ from = 51000; to = 51999; }];
services.vsftpd = {
enable = true;
writeEnable = true;
anonymousUser = true;
anonymousUserNoPassword = true;
anonymousUploadEnable = true;
anonymousMkdirEnable = true;
anonymousUserHome = "/home/ftp";
extraConfig = ''
pasv_enable=Yes
pasv_min_port=51000
pasv_max_port=51999
'';
};
}
+59
View File
@@ -0,0 +1,59 @@
{ config, lib, pkgs, ... }:
{
services.gitea = {
enable = true;
appName = "Sealight Git Forge";
domain = "git.sealight.xyz";
rootUrl = "https://git.sealight.xyz";
httpPort = 3001;
database = {
type = "postgres";
# passwordFile = "/run/secrets/gitea-dbpass"; # TODO supplied by agenix
password = "somethingunknowablesorry";
};
settings = {
metrics = {
ENABLED = true;
};
repository = {
DEFAULT_BRANCH = "main";
};
};
};
services.postgresql = {
enable = true; # Ensure postgresql is enabled
authentication = ''
local gitea all ident map=gitea-users
'';
identMap = # Map the gitea user to postgresql
''
gitea-users gitea gitea
'';
# ensureDatabases = [ "gitea" ];
ensureUsers = [
{ name = "gitea"; ensurePermissions."DATABASE gitea" = "ALL PRIVILEGES"; }
];
# TODO
# initialScript # set password for gitea user
};
services.nginx = {
enable = true; # Enable Nginx
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
virtualHosts."git.sealight.xyz" = {
# Gitea hostname
enableACME = true; # Use ACME certs
forceSSL = true; # Force SSL
locations."/".proxyPass = "http://localhost:3001/"; # Proxy Gitea
};
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
security.acme.defaults.email = "anish+acme@lakhwara.com";
security.acme.acceptTerms = true;
}
+12
View File
@@ -0,0 +1,12 @@
{ config, lib, pkgs, ... }:
{
services.gonic.enable = true;
services.gonic.settings = ''
music-path /mnt/two/music/
podcast-path /data/podcasts
cache-path /data/cache
'';
services.gonic.group = "audio";
services.gonic.user = "headphones";
networking.firewall.allowedTCPPorts = [ 4747 ];
}
+110
View File
@@ -0,0 +1,110 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.gonic;
configFile = "/etc/gonic/config";
dataFolder = "/var/lib/gonic";
in
{
options = {
services.gonic = {
enable = mkEnableOption "Gonic music server and streamer";
settings = lib.mkOption {
type = types.str;
default = { };
example = literalExample ''
music-path <path to your music dir>
podcast-path <path to your podcasts dir>
cache-path <path to cache dir>
'';
description = ''
Configuration for Gonic, see <link xlink:href="https://github.com/sentriz/gonic"/> for supported values.
'';
};
user = mkOption {
type = types.str;
default = "gonic";
description = "User account under which gonic runs.";
};
group = mkOption {
type = types.str;
default = "gonic";
description = "Group account under which gonic runs.";
};
};
};
config = mkIf cfg.enable {
environment.etc."gonic/config".text = cfg.settings;
systemd.services.gonic = {
description = "gonic Music Server and Streamer compatible with Subsonic/Airsonic";
after = [ "remote-fs.target" "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
#GONIC_MUSIC_PATH
#GONIC_PODCAST_PATH
#GONIC_CACHE_PATH
#GONIC_DB_PATH
GONIC_SCAN_INTERVAL = "800";
#...
};
serviceConfig = {
ExecStart = "${pkgs.gonic}/bin/gonic -config-path /etc/gonic/config";
WorkingDirectory = dataFolder;
TimeoutStopSec = "20";
KillMode = "process";
Restart = "on-failure";
RestartSec = "10";
User = cfg.user;
Group = cfg.group;
DevicePolicy = "closed";
NoNewPrivileges = " yes";
PrivateTmp = "yes";
PrivateUsers = "yes";
ProtectControlGroups = "yes";
ProtectKernelModules = "yes";
ProtectKernelTunables = "yes";
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
RestrictNamespaces = "yes";
RestrictRealtime = "yes";
SystemCallFilter = "~@clock @debug @module @mount @obsolete @privileged @reboot @setuid @swap";
ReadWritePaths = dataFolder;
StateDirectory = baseNameOf dataFolder;
};
};
users.users = optionalAttrs (cfg.user == "gonic") ({
gonic = {
description = "gonic service user";
name = cfg.user;
group = cfg.group;
isSystemUser = true;
};
});
users.groups = optionalAttrs (cfg.group == "gonic") ({
gonic = { };
});
services.nginx.virtualHosts."music.mossnet.lan" = {
enableACME = false;
forceSSL = false;
locations."/" = {
extraConfig = ''
proxy_pass http://localhost:4747/;
proxy_set_header X-Forwarded-Host $host;
'';
};
};
};
}
+6
View File
@@ -0,0 +1,6 @@
{ ... }:
{
services."grasp".enable = true;
services."grasp".path = "/home/anish/kitaab/grasp";
services."grasp".user = "anish";
}
+12
View File
@@ -0,0 +1,12 @@
{ config, lib, pkgs, ... }:
{
services.headphones = {
enable = true;
host = "192.168.1.240";
port = 8181;
user = "headphones";
group = "audio";
dataDir = "/data/music";
};
networking.firewall.allowedTCPPorts = [ 8181 ];
}
+54
View File
@@ -0,0 +1,54 @@
{ pkgs, ... }:
let
adblockLocalZones = pkgs.stdenv.mkDerivation {
name = "unbound-zones-adblock";
src = (pkgs.fetchFromGitHub {
owner = "StevenBlack";
repo = "hosts";
rev = "3.0.0";
sha256 = "01g6pc9s1ah2w1cbf6bvi424762hkbpbgja9585a0w99cq0n6bxv";
} + "/hosts");
phases = [ "installPhase" ];
installPhase = ''
${pkgs.gawk}/bin/awk '{sub(/\r$/,"")} {sub(/^127\.0\.0\.1/,"0.0.0.0")} BEGIN { OFS = "" } NF == 2 && $1 == "0.0.0.0" { print "local-zone: \"", $2, "\" static"}' $src | tr '[:upper:]' '[:lower:]' | sort -u > $out
'';
};
in {
networking.firewall.allowedTCPPorts = [ 53 853 5353 ];
networking.firewall.allowedUDPPorts = [ 53 853 5353 ];
networking.extraHosts = '' 127.0.0.1 helix.domain '';
# These are causing errors, goes in the server file
# local-data = "mossnet.lan. 10800 IN A 10.0.69.4";
services.unbound = {
enable = true;
settings = {
server = {
interface = [ "0.0.0.0" ];
tls-upstream = true;
tls-cert-bundle = "/etc/ssl/certs/ca-certificates.crt";
include = "${adblockLocalZones}";
access-control = "0.0.0.0/0 allow";
verbosity = 2;
do-tcp = true;
do-ip4 = true;
prefetch = true;
prefetch-key = true;
rrset-roundrobin = true;
qname-minimisation-strict = true;
hide-identity = true;
hide-version = true;
local-zone = "mossnet.lan. static";
};
forward-zone = [{
name = ".";
forward-addr = [ "45.90.28.77#c46fd3.dns1.nextdns.io" "2a07:a8c0::#c46fd3.dns1.nextdns.io" "45.90.30.77#c46fd3.dns2.nextdns.io" "2a07:a8c1::#c46fd3.dns2.nextdns.io" ];
}];
remote-control.control-enable = true;
};
};
}
+82
View File
@@ -0,0 +1,82 @@
# SPDX-License-Identifier: Apache-2.0
{ config, pkgs, ... }:
let
# https://github.com/StevenBlack/hosts/issues/451
# https://github.com/ScriptTiger/Hosts-Conversions
# https://github.com/ScriptTiger/scripttiger.github.io
# https://scripttiger.github.io/alts/
adblockLocalZones = pkgs.stdenv.mkDerivation {
name = "adblock-rpz";
src = (pkgs.fetchFromGitHub {
owner = "ScriptTiger";
repo = "scripttiger.github.io";
# nix-prefetch-github ScriptTiger scripttiger.github.io
rev = "04402a6726f97c5f0d30436a70ac1344dccb7cf1";
sha256 = "iSTR7j7QEr5xYtImyntDlVLbnN2ipcLcTRr4sfdx078=";
} + "/alts/rpz/blacklist.txt");
phases = [ "installPhase" ];
installPhase = "install -m 444 -D $src $out";
};
domain = "sealight.xyz";
certdir = config.security.acme.certs.${domain}.directory;
in {
networking.firewall.allowedTCPPorts = [ 853 ];
networking.firewall.allowedUDPPorts = [ 853 ];
# for acme certs, TODO: make a proper group
users.users.knot-resolver.extraGroups = [ "nginx" ];
# https://github.com/NixOS/nixpkgs/issues/81109
nixpkgs.config.packageOverrides = pkgs: rec {
knot-resolver = pkgs.knot-resolver.override { extraFeatures = true; };
};
services.kresd = {
enable = true;
# Plain DNS only from localhost.
# You might want to add a LAN or VPN subnet, depending on deployment.
#listenPlain = [ "127.0.0.1:53" "[::1]:53" ];
listenTLS = [ "853" ];
#listenDoH = [ "" "[::]:443" ];
instances = 1;
# TODO: chain.pem for stapling: https://gitlab.nic.cz/knot/knot-resolver/-/issues/517
#policy.add(
# policy.rpz(
# policy.DENY_MSG('domain blocked by your resolver operator'),
# '${adblockLocalZones}',
# false))
extraConfig = ''
modules.load('predict')
modules = { 'hints > iterate' }
policy.add(
policy.all(
policy.TLS_FORWARD({
{'45.90.30.0', hostname='87b4e9.dns2.nextdns.io'},
{'45.90.28.0', hostname='87b4e9.dns1.nextdns.io'}
})))
policy.add(policy.suffix(policy.FORWARD({'10.0.69.4', '172.16.11.240'}), {todname('mossnet.lan')}))
net.tls("${certdir}/fullchain.pem", "${certdir}/key.pem")
-- refresh certs when ACME updates them
-- TODO: could probably do with systemd and a control socket?
-- TODO: may fail on first boot if ACME isn't fast, add a systemd dep
local notify = require('cqueues.notify')
local watcher = notify.opendir('${certdir}')
watcher:add('fullchain.pem')
worker.coroutine(function ()
for flags, name in watcher:changes() do
for flag in notify.flags(flags) do
print('file watcher:', name, notify[flag])
end
if name == 'fullchain.pem' then
net.tls("${certdir}/fullchain.pem", "${certdir}/key.pem")
end
end
end)
-- Below we create a tmpfs FS for the cache -- use almost all of it:
cache.size = cache.fssize() - 1*MB
'';
};
}
+17
View File
@@ -0,0 +1,17 @@
# This file has been generated by node2nix 1.9.0. Do not edit!
{pkgs ? import <nixpkgs> {
inherit system;
}, system ? builtins.currentSystem, nodejs ? pkgs."nodejs-12_x"}:
let
nodeEnv = import ./node-env.nix {
inherit (pkgs) stdenv lib python2 runCommand writeTextFile writeShellScript;
inherit pkgs nodejs;
libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null;
};
in
import ./node-packages.nix {
inherit (pkgs) fetchurl nix-gitignore stdenv lib fetchgit;
inherit nodeEnv;
}
+588
View File
@@ -0,0 +1,588 @@
# This file originates from node2nix
{lib, stdenv, nodejs, python2, pkgs, libtool, runCommand, writeTextFile, writeShellScript}:
let
# Workaround to cope with utillinux in Nixpkgs 20.09 and util-linux in Nixpkgs master
utillinux = if pkgs ? utillinux then pkgs.utillinux else pkgs.util-linux;
python = if nodejs ? python then nodejs.python else python2;
# Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise
tarWrapper = runCommand "tarWrapper" {} ''
mkdir -p $out/bin
cat > $out/bin/tar <<EOF
#! ${stdenv.shell} -e
$(type -p tar) "\$@" --warning=no-unknown-keyword --delay-directory-restore
EOF
chmod +x $out/bin/tar
'';
# Function that generates a TGZ file from a NPM project
buildNodeSourceDist =
{ name, version, src, ... }:
stdenv.mkDerivation {
name = "node-tarball-${name}-${version}";
inherit src;
buildInputs = [ nodejs ];
buildPhase = ''
export HOME=$TMPDIR
tgzFile=$(npm pack | tail -n 1) # Hooks to the pack command will add output (https://docs.npmjs.com/misc/scripts)
'';
installPhase = ''
mkdir -p $out/tarballs
mv $tgzFile $out/tarballs
mkdir -p $out/nix-support
echo "file source-dist $out/tarballs/$tgzFile" >> $out/nix-support/hydra-build-products
'';
};
# Common shell logic
installPackage = writeShellScript "install-package" ''
installPackage() {
local packageName=$1 src=$2
local strippedName
local DIR=$PWD
cd $TMPDIR
unpackFile $src
# Make the base dir in which the target dependency resides first
mkdir -p "$(dirname "$DIR/$packageName")"
if [ -f "$src" ]
then
# Figure out what directory has been unpacked
packageDir="$(find . -maxdepth 1 -type d | tail -1)"
# Restore write permissions to make building work
find "$packageDir" -type d -exec chmod u+x {} \;
chmod -R u+w "$packageDir"
# Move the extracted tarball into the output folder
mv "$packageDir" "$DIR/$packageName"
elif [ -d "$src" ]
then
# Get a stripped name (without hash) of the source directory.
# On old nixpkgs it's already set internally.
if [ -z "$strippedName" ]
then
strippedName="$(stripHash $src)"
fi
# Restore write permissions to make building work
chmod -R u+w "$strippedName"
# Move the extracted directory into the output folder
mv "$strippedName" "$DIR/$packageName"
fi
# Change to the package directory to install dependencies
cd "$DIR/$packageName"
}
'';
# Bundle the dependencies of the package
#
# Only include dependencies if they don't exist. They may also be bundled in the package.
includeDependencies = {dependencies}:
lib.optionalString (dependencies != []) (
''
mkdir -p node_modules
cd node_modules
''
+ (lib.concatMapStrings (dependency:
''
if [ ! -e "${dependency.name}" ]; then
${composePackage dependency}
fi
''
) dependencies)
+ ''
cd ..
''
);
# Recursively composes the dependencies of a package
composePackage = { name, packageName, src, dependencies ? [], ... }@args:
builtins.addErrorContext "while evaluating node package '${packageName}'" ''
installPackage "${packageName}" "${src}"
${includeDependencies { inherit dependencies; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
'';
pinpointDependencies = {dependencies, production}:
let
pinpointDependenciesFromPackageJSON = writeTextFile {
name = "pinpointDependencies.js";
text = ''
var fs = require('fs');
var path = require('path');
function resolveDependencyVersion(location, name) {
if(location == process.env['NIX_STORE']) {
return null;
} else {
var dependencyPackageJSON = path.join(location, "node_modules", name, "package.json");
if(fs.existsSync(dependencyPackageJSON)) {
var dependencyPackageObj = JSON.parse(fs.readFileSync(dependencyPackageJSON));
if(dependencyPackageObj.name == name) {
return dependencyPackageObj.version;
}
} else {
return resolveDependencyVersion(path.resolve(location, ".."), name);
}
}
}
function replaceDependencies(dependencies) {
if(typeof dependencies == "object" && dependencies !== null) {
for(var dependency in dependencies) {
var resolvedVersion = resolveDependencyVersion(process.cwd(), dependency);
if(resolvedVersion === null) {
process.stderr.write("WARNING: cannot pinpoint dependency: "+dependency+", context: "+process.cwd()+"\n");
} else {
dependencies[dependency] = resolvedVersion;
}
}
}
}
/* Read the package.json configuration */
var packageObj = JSON.parse(fs.readFileSync('./package.json'));
/* Pinpoint all dependencies */
replaceDependencies(packageObj.dependencies);
if(process.argv[2] == "development") {
replaceDependencies(packageObj.devDependencies);
}
replaceDependencies(packageObj.optionalDependencies);
/* Write the fixed package.json file */
fs.writeFileSync("package.json", JSON.stringify(packageObj, null, 2));
'';
};
in
''
node ${pinpointDependenciesFromPackageJSON} ${if production then "production" else "development"}
${lib.optionalString (dependencies != [])
''
if [ -d node_modules ]
then
cd node_modules
${lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies}
cd ..
fi
''}
'';
# Recursively traverses all dependencies of a package and pinpoints all
# dependencies in the package.json file to the versions that are actually
# being used.
pinpointDependenciesOfPackage = { packageName, dependencies ? [], production ? true, ... }@args:
''
if [ -d "${packageName}" ]
then
cd "${packageName}"
${pinpointDependencies { inherit dependencies production; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
fi
'';
# Extract the Node.js source code which is used to compile packages with
# native bindings
nodeSources = runCommand "node-sources" {} ''
tar --no-same-owner --no-same-permissions -xf ${nodejs.src}
mv node-* $out
'';
# Script that adds _integrity fields to all package.json files to prevent NPM from consulting the cache (that is empty)
addIntegrityFieldsScript = writeTextFile {
name = "addintegrityfields.js";
text = ''
var fs = require('fs');
var path = require('path');
function augmentDependencies(baseDir, dependencies) {
for(var dependencyName in dependencies) {
var dependency = dependencies[dependencyName];
// Open package.json and augment metadata fields
var packageJSONDir = path.join(baseDir, "node_modules", dependencyName);
var packageJSONPath = path.join(packageJSONDir, "package.json");
if(fs.existsSync(packageJSONPath)) { // Only augment packages that exist. Sometimes we may have production installs in which development dependencies can be ignored
console.log("Adding metadata fields to: "+packageJSONPath);
var packageObj = JSON.parse(fs.readFileSync(packageJSONPath));
if(dependency.integrity) {
packageObj["_integrity"] = dependency.integrity;
} else {
packageObj["_integrity"] = "sha1-000000000000000000000000000="; // When no _integrity string has been provided (e.g. by Git dependencies), add a dummy one. It does not seem to harm and it bypasses downloads.
}
if(dependency.resolved) {
packageObj["_resolved"] = dependency.resolved; // Adopt the resolved property if one has been provided
} else {
packageObj["_resolved"] = dependency.version; // Set the resolved version to the version identifier. This prevents NPM from cloning Git repositories.
}
if(dependency.from !== undefined) { // Adopt from property if one has been provided
packageObj["_from"] = dependency.from;
}
fs.writeFileSync(packageJSONPath, JSON.stringify(packageObj, null, 2));
}
// Augment transitive dependencies
if(dependency.dependencies !== undefined) {
augmentDependencies(packageJSONDir, dependency.dependencies);
}
}
}
if(fs.existsSync("./package-lock.json")) {
var packageLock = JSON.parse(fs.readFileSync("./package-lock.json"));
if(![1, 2].includes(packageLock.lockfileVersion)) {
process.stderr.write("Sorry, I only understand lock file versions 1 and 2!\n");
process.exit(1);
}
if(packageLock.dependencies !== undefined) {
augmentDependencies(".", packageLock.dependencies);
}
}
'';
};
# Reconstructs a package-lock file from the node_modules/ folder structure and package.json files with dummy sha1 hashes
reconstructPackageLock = writeTextFile {
name = "addintegrityfields.js";
text = ''
var fs = require('fs');
var path = require('path');
var packageObj = JSON.parse(fs.readFileSync("package.json"));
var lockObj = {
name: packageObj.name,
version: packageObj.version,
lockfileVersion: 1,
requires: true,
dependencies: {}
};
function augmentPackageJSON(filePath, dependencies) {
var packageJSON = path.join(filePath, "package.json");
if(fs.existsSync(packageJSON)) {
var packageObj = JSON.parse(fs.readFileSync(packageJSON));
dependencies[packageObj.name] = {
version: packageObj.version,
integrity: "sha1-000000000000000000000000000=",
dependencies: {}
};
processDependencies(path.join(filePath, "node_modules"), dependencies[packageObj.name].dependencies);
}
}
function processDependencies(dir, dependencies) {
if(fs.existsSync(dir)) {
var files = fs.readdirSync(dir);
files.forEach(function(entry) {
var filePath = path.join(dir, entry);
var stats = fs.statSync(filePath);
if(stats.isDirectory()) {
if(entry.substr(0, 1) == "@") {
// When we encounter a namespace folder, augment all packages belonging to the scope
var pkgFiles = fs.readdirSync(filePath);
pkgFiles.forEach(function(entry) {
if(stats.isDirectory()) {
var pkgFilePath = path.join(filePath, entry);
augmentPackageJSON(pkgFilePath, dependencies);
}
});
} else {
augmentPackageJSON(filePath, dependencies);
}
}
});
}
}
processDependencies("node_modules", lockObj.dependencies);
fs.writeFileSync("package-lock.json", JSON.stringify(lockObj, null, 2));
'';
};
prepareAndInvokeNPM = {packageName, bypassCache, reconstructLock, npmFlags, production}:
let
forceOfflineFlag = if bypassCache then "--offline" else "--registry http://www.example.com";
in
''
# Pinpoint the versions of all dependencies to the ones that are actually being used
echo "pinpointing versions of dependencies..."
source $pinpointDependenciesScriptPath
# Patch the shebangs of the bundled modules to prevent them from
# calling executables outside the Nix store as much as possible
patchShebangs .
# Deploy the Node.js package by running npm install. Since the
# dependencies have been provided already by ourselves, it should not
# attempt to install them again, which is good, because we want to make
# it Nix's responsibility. If it needs to install any dependencies
# anyway (e.g. because the dependency parameters are
# incomplete/incorrect), it fails.
#
# The other responsibilities of NPM are kept -- version checks, build
# steps, postprocessing etc.
export HOME=$TMPDIR
cd "${packageName}"
runHook preRebuild
${lib.optionalString bypassCache ''
${lib.optionalString reconstructLock ''
if [ -f package-lock.json ]
then
echo "WARNING: Reconstruct lock option enabled, but a lock file already exists!"
echo "This will most likely result in version mismatches! We will remove the lock file and regenerate it!"
rm package-lock.json
else
echo "No package-lock.json file found, reconstructing..."
fi
node ${reconstructPackageLock}
''}
node ${addIntegrityFieldsScript}
''}
npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} rebuild
if [ "''${dontNpmInstall-}" != "1" ]
then
# NPM tries to download packages even when they already exist if npm-shrinkwrap is used.
rm -f npm-shrinkwrap.json
npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} install
fi
'';
# Builds and composes an NPM package including all its dependencies
buildNodePackage =
{ name
, packageName
, version
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, preRebuild ? ""
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, meta ? {}
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "preRebuild" "unpackPhase" "buildPhase" "meta" ];
in
stdenv.mkDerivation ({
name = "${name}-${version}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit nodejs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall preRebuild unpackPhase buildPhase;
compositionScript = composePackage args;
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "compositionScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
# Create and enter a root node_modules/ folder
mkdir -p $out/lib/node_modules
cd $out/lib/node_modules
# Compose the package and all its dependencies
source $compositionScriptPath
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Create symlink to the deployed executable folder, if applicable
if [ -d "$out/lib/node_modules/.bin" ]
then
ln -s $out/lib/node_modules/.bin $out/bin
fi
# Create symlinks to the deployed manual page folders, if applicable
if [ -d "$out/lib/node_modules/${packageName}/man" ]
then
mkdir -p $out/share
for dir in "$out/lib/node_modules/${packageName}/man/"*
do
mkdir -p $out/share/man/$(basename "$dir")
for page in "$dir"/*
do
ln -s $page $out/share/man/$(basename "$dir")
done
done
fi
# Run post install hook, if provided
runHook postInstall
'';
meta = {
# default to Node.js' platforms
platforms = nodejs.meta.platforms;
} // meta;
} // extraArgs);
# Builds a node environment (a node_modules folder and a set of binaries)
buildNodeDependencies =
{ name
, packageName
, version
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" ];
in
stdenv.mkDerivation ({
name = "node-dependencies-${name}-${version}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall unpackPhase buildPhase;
includeScript = includeDependencies { inherit dependencies; };
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "includeScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
mkdir -p $out/${packageName}
cd $out/${packageName}
source $includeScriptPath
# Create fake package.json to make the npm commands work properly
cp ${src}/package.json .
chmod 644 package.json
${lib.optionalString bypassCache ''
if [ -f ${src}/package-lock.json ]
then
cp ${src}/package-lock.json .
fi
''}
# Go to the parent folder to make sure that all packages are pinpointed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Expose the executables that were installed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
mv ${packageName} lib
ln -s $out/lib/node_modules/.bin $out/bin
'';
} // extraArgs);
# Builds a development shell
buildNodeShell =
{ name
, packageName
, version
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
nodeDependencies = buildNodeDependencies args;
in
stdenv.mkDerivation {
name = "node-shell-${name}-${version}";
buildInputs = [ python nodejs ] ++ lib.optional (stdenv.isLinux) utillinux ++ buildInputs;
buildCommand = ''
mkdir -p $out/bin
cat > $out/bin/shell <<EOF
#! ${stdenv.shell} -e
$shellHook
exec ${stdenv.shell}
EOF
chmod +x $out/bin/shell
'';
# Provide the dependencies in a development shell through the NODE_PATH environment variable
inherit nodeDependencies;
shellHook = lib.optionalString (dependencies != []) ''
export NODE_PATH=${nodeDependencies}/lib/node_modules
export PATH="${nodeDependencies}/bin:$PATH"
'';
};
in
{
buildNodeSourceDist = lib.makeOverridable buildNodeSourceDist;
buildNodePackage = lib.makeOverridable buildNodePackage;
buildNodeDependencies = lib.makeOverridable buildNodeDependencies;
buildNodeShell = lib.makeOverridable buildNodeShell;
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,48 @@
{ stdenv, fetchFromGitHub, pkgs, lib, nodejs, nodePackages, pkg-config, libjpeg, pixman, cairo, pango }:
let
# No official version ever released
src = fetchFromGitHub {
owner = "Sorunome";
repo = "mx-puppet-slack";
rev = "691e53d2d703bd169e1f23a8d8dff3f008d8c4ef";
sha256 = "1yczhfpa4qzvijcpgc2pr10s009qb6jwlfwpcbb17g2wsx6zj0c2";
};
myNodePackages = import ./node-composition.nix {
inherit pkgs nodejs;
inherit (stdenv.hostPlatform) system;
};
in myNodePackages.package.override {
pname = "mx-puppet-slack";
inherit src;
nativeBuildInputs = [ nodePackages.node-pre-gyp pkg-config ];
buildInputs = [ libjpeg pixman cairo pango ];
postInstall = ''
# Patch shebangs in node_modules, otherwise the webpack build fails with interpreter problems
patchShebangs --build "$out/lib/node_modules/mx-puppet-slack/node_modules/"
# compile Typescript sources
npm run build
# Make an executable to run the server
mkdir -p $out/bin
cat <<EOF > $out/bin/mx-puppet-slack
#!/bin/sh
exec ${nodejs}/bin/node $out/lib/node_modules/mx-puppet-slack/build/index.js "\$@"
EOF
chmod +x $out/bin/mx-puppet-slack
'';
meta = with lib; {
description = "A slack puppeting bridge for matrix";
license = licenses.asl20;
homepage = "https://github.com/Sorunome/mx-puppet-slack";
maintainers = with maintainers; [ chickensoupwithrice ];
platforms = platforms.unix;
};
}
+378
View File
@@ -0,0 +1,378 @@
{ config, lib, pkgs, ... }:
{
services.matrix-synapse = {
enable = true;
settings = {
server_name = "sealight.xyz";
listeners = [
{
port = 8448;
tls = false;
resources = [{
compress = true;
names = [ "client" "federation" ];
}];
}
{
port = 9090;
type = "metrics";
bind_addresses = [ "0.0.0.0" ];
resources = [{
compress = false;
names = [ ];
}];
tls = false;
}
];
app_service_config_files = [
# The registration file is automatically generated after starting the appservice for the first time.
# cp /var/lib/matrix-appservice-discord/discord-registration.yaml /var/lib/matrix-synapse/
# chown matrix-synapse:matrix-synapse /var/lib/matrix-synapse/discord-registration.yaml
# "/var/lib/matrix-synapse/telegram-registration.yaml"
# "/var/lib/matrix-synapse/slack-registration.yaml"
# "/var/lib/matrix-synapse/discord-registration.yaml"
# "/var/lib/matrix-synapse/whatsapp-registration.yaml"
];
turn_uris = [
"turn:turn.sealight.xyz:3478?transport=udp"
"turn:turn.sealight.xyz:3478?transport=tcp"
];
turn_shared_secret = config.services.coturn.static-auth-secret;
extraConfig = ''
max_upload_size: "50M"
use_presence: false
registration_shared_secret: "hD9HQGTTDxp0mQsQ5JDsfudWMDiubmZENOgPchIvfBvUlPxlvQSvjoO4wn2L1seU";
'';
enable_metrics = true;
enable_registration = false;
database = {
name = "psycopg2";
args.password = "Da0?H*9i{x?,]|kq@iBwlIzu"; # TODO agenix
};
};
## coturn based TURN server integration (TURN server setup mentioned later),
## shared secret generated while configuring coturn
## and reused here (power of Nix being a real programming language)
};
services.coturn = {
enable = true;
use-auth-secret = true;
static-auth-secret = "jXW1ohIq6wM3NB00xeME3uBihY85xjpkhGoyzBIdwhOpj7gjyxXZu1fwp1lUiYwJ";
realm = "turn.sealight.xyz";
min-port = 49111;
max-port = 51111;
no-cli = true;
no-tcp-relay = true;
no-tls = true;
cert = "${config.security.acme.certs."turn.sealight.xyz".directory}/full.pem";
pkey = "${config.security.acme.certs."turn.sealight.xyz".directory}/key.pem";
extraConfig = ''
verbose
user-quota=12
total-quota=1200
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255
denied-peer-ip=127.0.0.0-127.255.255.255
denied-peer-ip=169.254.0.0-169.254.255.255
denied-peer-ip=192.0.0.0-192.0.0.255
denied-peer-ip=192.0.2.0-192.0.2.255
denied-peer-ip=192.88.99.0-192.88.99.255
denied-peer-ip=198.18.0.0-198.19.255.255
denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255
'';
};
security.acme.certs.${config.services.coturn.realm} = {
/* insert here the right configuration to obtain a certificate */
webroot = "/var/lib/acme/acme-challenge/";
email = "anish+acme@lakhwara.com";
postRun = "systemctl restart coturn.service";
group = "turnserver";
};
# TODO fix up jitsi bridge stuff
## services.jitsi-meet = {
## enable = true;
## hostName = "jitsi.sealight.xyz";
## };
## services.jitsi-videobridge.enable = true;
services.postgresql = {
enable = true;
## postgresql user and db name remains in the
## service.matrix-synapse.database_args setting which
## by default is matrix-synapse
# TODO agenix
initialScript = pkgs.writeText "synapse-init.sql" ''
CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD "Da0?H*9i{x?,]|kq@iBwlIzu";
CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
TEMPLATE template0
LC_COLLATE = "C"
LC_CTYPE = "C";
'';
authentication = ''
local matrix-synapse all ident map=matrix-synapse-users
'';
identMap = # Map the matrix-synapse user to postgresql
''
matrix-synapse-users matrix-synapse matrix-synapse
'';
};
networking.firewall =
let
range = with config.services.coturn; [{
from = min-port;
to = max-port;
}];
in
{
enable = true;
allowedUDPPortRanges = range; # coturn
allowedTCPPortRanges = range;
allowedTCPPorts = [
22 # SSH
8448 # Matrix federation
8008
80
443
3478 # Coturn service
5349 # Coturn service
9090 # Synapse Metrics
];
allowedUDPPorts = [
3478
5349 # Coturn service
];
};
nixpkgs.overlays = [
(self: super: {
element-web = super.element-web.override {
conf = {
default_server_config = {
"m.homeserver" = {
"base_url" = "https://chat.sealight.xyz";
"server_name" = "sealight.xyz";
};
"m.identity_server" = {
"base_url" = "https://vector.im";
};
};
## jitsi will be setup later,
## but we need to add to Riot configuration
jitsi.preferredDomain = "jitsi.sealight.xyz";
};
};
})
];
services.nginx = {
enable = true;
virtualHosts = {
"sealight.xyz" = {
forceSSL = true;
enableACME = true;
locations."/" = {
root = "/var/www/sealight.xyz";
};
locations."/_matrix" = {
proxyPass = "http://localhost:8448";
};
# locations."/slackbridge" = {
# proxyPass = "http://localhost:9899";
# };
};
## virtual host for Synapse
"chat.sealight.xyz" = {
forceSSL = true;
enableACME = true; # TODO
#useACMEHost = "sealight.xyz";
locations."/" = {
proxyPass = "http://localhost:8448";
};
};
## virtual host for Riot/Web
"element.sealight.xyz" = {
forceSSL = true;
enableACME = true; # TODO
#useACMEHost = "sealight.xyz";
## root points to the riot-web package content, also configured via Nix
locations."/" = {
root = pkgs.element-web;
};
};
# ${config.services.jitsi-meet.hostName} = {
# enableACME = true; # TODO
# forceSSL = true; # TODO
# };
};
## other nginx specific best practices
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedTlsSettings = true;
};
services.mautrix-telegram = {
enable = false;
environmentFile = /etc/secrets/telegram.env; # file containing the appservice and telegram tokens
# The appservice is pre-configured to use SQLite by default. It's also possible to use PostgreSQL.
settings = {
homeserver = {
address = "https://sealight.xyz";
domain = "sealight.xyz";
};
appservice = {
provisioning.enabled = false;
id = "telegram";
bot_username = "telegrambridge";
public = {
enabled = false;
prefix = "/public";
external = "https://chat.sealight.xyz/public";
};
address = "http://localhost:18787";
port = 18787;
# The service uses SQLite by default, but it's also possible to use PostgreSQL instead:
#database = "postgresql:///mautrix-telegram?host=/run/postgresql";
};
bridge = {
relaybot.authless_portals = false;
permissions = {
"@aynish:sealight.xyz" = "admin";
};
};
};
};
# services.heisenbridge = {
# enable = true;
# homeserver = "https://sealight.xyz";
# listenPort = 14456;
# appServiceToken = "wyujLh8kjpmk2bfKeEE3sZ2gWOEUBKK5";
# homeserverToken = "yEHs7lthD2ZHUibJOAv1APaFhEjxN5PT";
# };
# environment.systemPackages = with pkgs; [ matrix-appservice-slack ];
#services.mx-puppet-slack= {
#enable = false;
#settings = {
# bridge = {
# bindAddress = "localhost";
# port = 16786;
# domain = "sealight.xyz";
# homeserverUrl = "https://sealight.xyz";
# };
# provisioning = {
# whitelist = [ "@aynish:sealight.xyz" ];
# };
# relay = {
# whitelist = [ "@aynish:sealight.xyz" ];
# };
# namePatterns = {
# group = ":name";
# room = ":name[:team? - :team,]";
# user = ":name (Slack)";
# userOverride = ":displayname";
# };
# presence = {
# enabled = false;
# };
# oauth = {
# enabled = false;
# };
#};
#serviceDependencies = ["matrix-synapse.service"];
#};
# services.mautrix-whatsapp = {
# enable = false;
# The appservice is pre-configured to use SQLite by default. It's also possible to use PostgreSQL.
#configOptions = {
# homeserver = {
# address = "https://chat.sealight.xyz";
# domain = "sealight.xyz";
# };
# appservice = {
# id = "whatsapp";
# address = http://localhost:9897;
# hostname = "0.0.0.0";
# port = 9897;
# database = {
# type = "sqlite3";
# uri = "/var/lib/mautrix-whatsapp/mautrix-whatsapp.db";
# };
# state_store_path = "/var/lib/mautrix-whatsapp/mx-state.json";
# bot = {
# username = "whatsappbot";
# displayname = "WhatsApp bridge bot";
# avatar = "mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr";
# };
# as_token = "";
# hs_token = "";
# };
# bridge = {
# username_template = "whatsapp_{{.}}";
# displayname_template = "{{if .Notify}}{{.Notify}}{{else}}{{.Jid}}{{end}} (WA)";
# command_prefix = "!wa";
# permissions = {
# "@aynish:sealight.xyz" = 100;
# };
# };
# logging = {
# directory = "/var/lib/mautrix-whatsapp/logs";
# file_name_format = "{{.Date}}-{{.Index}}.log";
# file_date_format = "\"2006-01-02\"";
# file_mode = 384;
# timestamp_format = "Jan _2, 2006 15:04:05";
# print_level = "debug";
# };
# metrics = {
# enabled = true;
# listen = "http://localhost:5070";
# };
#};
# };
#services.mx-puppet-discord = {
# enable = true;
# settings = {
# bridge = {
# bindAddress = "localhost";
# port = 16785;
# domain = "sealight.xyz";
# homeserverUrl = "https://sealight.xyz";
# avatarUrl = "https://discord.com/assets/2d20a45d79110dc5bf947137e9d99b66.svg";
# };
# provisioning = {
# whitelist = [ "@aynish:sealight.xyz" ];
# };
# relay = {
# whitelist = [ "@aynish:sealight.xyz" ];
# };
# namePatterns = {
# group = ":name";
# room = ":name";
# user = ":name (Discord)";
# userOverride = ":displayname";
# };
# presence = {
# enabled = false;
# };
# };
# serviceDependencies = [ "matrix-synapse.service" ];
#};
}
+154
View File
@@ -0,0 +1,154 @@
{ config, pkgs, lib, ... }:
with lib;
let
dataDir = "/var/lib/matrix-appservice-slack";
registrationFile = "${dataDir}/slack-registration.yaml";
#appDir = "${pkgs.matrix-appservice-slack}/${pkgs.matrix-appservice-slack.passthru.nodeAppDir}";
cfg = config.services.matrix-appservice-slack;
# TODO: switch to configGen.json once RFC42 is implemented
settingsFile = pkgs.writeText "matrix-appservice-slack-settings.json" (builtins.toJSON cfg.settings);
in
{
options = {
services.matrix-appservice-slack = {
enable = mkEnableOption "a bridge between Matrix and Slack";
settings = mkOption rec {
# TODO: switch to types.config.json as prescribed by RFC42 once it's implemented
type = types.attrs;
apply = recursiveUpdate default;
default = {
# empty values necessary for registration file generation
# actual values defined in environmentFile
auth = {
clientID = "";
botToken = "";
};
};
example = literalExample ''
{
bridge = {
domain = "public-domain.tld";
homeserverUrl = "http://public-domain.tld:8008";
};
}
'';
description = ''
<filename>config.yaml</filename> configuration as a Nix attribute set.
</para>
<para>
Configuration options should match those described in
<link xlink:href="https://github.com/Half-Shot/matrix-appservice-discord/blob/master/config/config.sample.yaml">
config.sample.yaml</link>.
</para>
<para>
<option>config.bridge.domain</option> and <option>config.bridge.homeserverUrl</option>
should be set to match the public host name of the Matrix homeserver for webhooks and avatars to work.
</para>
<para>
Secret tokens should be specified using <option>environmentFile</option>
instead of this world-readable attribute set.
'';
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
File containing environment variables to be passed to the matrix-appservice-discord service,
in which secret tokens can be specified securely by defining values for
<literal>APPSERVICE_SLACK_AUTH_CLIENT_I_D</literal> and
<literal>APPSERVICE_SLACK_AUTH_BOT_TOKEN</literal>.
'';
};
url = mkOption {
type = types.str;
default = "http://localhost:${toString cfg.port}";
description = ''
The URL where the application service is listening for HS requests.
'';
};
port = mkOption {
type = types.port;
default = 9898; # from https://github.com/matrix-org/matrix-appservice-slack/blob/develop/config/config.sample.yaml#L70
description = ''
Port number on which the bridge should listen for internal communication with the Matrix homeserver.
'';
};
localpart = mkOption {
type = with types; nullOr str;
default = null;
description = ''
The user_id localpart to assign to the AS.
'';
};
serviceDependencies = mkOption {
type = with types; listOf str;
default = optional config.services.matrix-synapse.enable "matrix-synapse.service";
description = ''
List of Systemd services to require and wait for when starting the application service,
such as the Matrix homeserver if it's running on the same host.
'';
};
};
};
config = mkIf cfg.enable {
systemd.services.matrix-appservice-slack = {
description = "A bridge between Matrix and Slack.";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
after = [ "network-online.target" ] ++ cfg.serviceDependencies;
preStart = ''
if [ ! -f '${registrationFile}' ]; then
${pkgs.matrix-appservice-slack}/bin/matrix-appservice-slack \
--generate-registration \
--url=${escapeShellArg cfg.url} \
${optionalString (cfg.localpart != null) "--localpart=${escapeShellArg cfg.localpart}"} \
--config='${settingsFile}' \
--file='${registrationFile}'
fi
'';
serviceConfig = {
Type = "simple";
Restart = "always";
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
DynamicUser = true;
PrivateTmp = true;
#WorkingDirectory = appDir;
StateDirectory = baseNameOf dataDir;
UMask = 0027;
EnvironmentFile = cfg.environmentFile;
ExecStart = ''
${pkgs.matrix-appservice-slack}/bin/matrix-appservice-slack \
--file='${registrationFile}' \
--config='${settingsFile}' \
--port='${toString cfg.port}'
'';
};
};
};
meta.maintainers = with maintainers; [ chickensoupandrice ];
}
+97
View File
@@ -0,0 +1,97 @@
{ lib, config, pkgs, ... }:
with lib;
let
cfg = config.services.mautrix-whatsapp;
configFile = pkgs.runCommand "mautrix-whatsapp"
{
buildInputs = [ pkgs.mautrix-whatsapp pkgs.remarshal ];
preferLocalBuild = true;
} ''
mkdir -p $out
${pkgs.remarshal}/bin/json2yaml -i ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \
-o $out/config.yaml
${pkgs.mautrix-whatsapp}/bin/mautrix-whatsapp -c $out/config.yaml -g -r $out/registration.yaml
'';
in
{
options.services.mautrix-whatsapp = {
enable = mkEnableOption "Mautrix-whatsapp, a puppeting bridge between Matrix and WhatsApp.";
configOptions = mkOption {
type = types.attrs;
description = ''
This options will be transform in YAML configuration file for the bridge
Look <link xlink:href="https://github.com/tulir/mautrix-whatsapp/wiki/Bridge-setup">here</link> for documentation.
'';
example = {
configOptions = {
homeserver = {
address = https://matrix.org;
domain = "matrix.org";
};
appservice = {
address = http://localhost:8080;
hostname = "0.0.0.0";
port = 8080;
database = {
type = "sqlite3";
uri = "/var/lib/mautrix-whatsapp/mautrix-whatsapp.db";
};
state_store_path = "/var/lib/mautrix-whatsapp/mx-state.json";
id = "whatsapp";
bot = {
username = "whatsappbot";
displayname = "WhatsApp bridge bot";
avatar = "mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr";
};
as_token = "";
hs_token = "";
};
bridge = {
username_template = "whatsapp_{{.}}";
displayname_template = "{{if .Notify}}{{.Notify}}{{else}}{{.Jid}}{{end}} (WA)";
command_prefix = "!wa";
permissions = {
"@example:matrix.org" = 100;
};
};
logging = {
directory = "/var/lib/mautrix-whatsapp/logs";
file_name_format = "{{.Date}}-{{.Index}}.log";
file_date_format = "\"2006-01-02\"";
file_mode = 384;
timestamp_format = "Jan _2, 2006 15:04:05";
print_level = "debug";
};
};
};
};
};
config = mkIf cfg.enable {
systemd.services.mautrix-whatsapp = {
description = "Mautrix-WhatsApp Service - A WhatsApp bridge for Matrix";
after = [ "network.target" "matrix-synapse.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
StateDirectory = "mautrix-whatsapp";
LoggingDir = "mautrix-whatsapp";
ExecStart = ''
${pkgs.mautrix-whatsapp}/bin/mautrix-whatsapp -c "${configFile}/config.yaml"
'';
Restart = "on-failure";
};
};
#services.matrix-synapse.app_service_config_files = [ "${configFile}/registration.yaml" ];
};
}
+125
View File
@@ -0,0 +1,125 @@
{ config, pkgs, lib, ... }:
with lib;
let
dataDir = "/var/lib/mx-puppet-slack";
registrationFile = "${dataDir}/slack-registration.yaml";
cfg = config.services.mx-puppet-slack;
settingsFormat = pkgs.formats.json { };
settingsFile = settingsFormat.generate "mx-puppet-slack-config.json" cfg.settings;
mx-puppet-slack = pkgs.callPackage ./pkg-mx-puppet-slack.nix { };
in
{
options = {
services.mx-puppet-slack = {
enable = mkEnableOption ''
mx-puppet-slack is a slack puppeting bridge for matrix.
It handles bridging private and group DMs, as well as Guilds (servers)
'';
settings = mkOption rec {
apply = recursiveUpdate default;
inherit (settingsFormat) type;
default = {
bridge.port = 8434;
presence = {
enabled = true;
interval = 500;
};
provisioning.whitelist = [ ];
relay.whitelist = [ ];
# variables are preceded by a colon.
namePatterns = {
user = ":name";
userOverride = ":displayname";
room = ":name";
group = ":name";
};
#defaults to sqlite but can be configured to use postgresql with
#connstring
database.filename = "${dataDir}/database.db";
logging = {
console = "info";
lineDateFormat = "MMM-D HH:mm:ss.SSS";
};
};
example = literalExpression ''
{
bridge = {
bindAddress = "localhost";
domain = "example.com";
homeserverUrl = "https://example.com";
};
provisioning.whitelist = [ "@admin:example.com" ];
relay.whitelist = [ "@.*:example.com" ];
}
'';
description = ''
<filename>config.yaml</filename> configuration as a Nix attribute set.
Configuration options should match those described in
<link xlink:href="https://github.com/matrix-slack/mx-puppet-slack/blob/master/sample.config.yaml">
sample.config.yaml</link>.
'';
};
serviceDependencies = mkOption {
type = with types; listOf str;
default = optional config.services.matrix-synapse.enable "matrix-synapse.service";
description = ''
List of Systemd services to require and wait for when starting the application service.
'';
};
};
};
config = mkIf cfg.enable {
systemd.services.mx-puppet-slack = {
description = ''
mx-puppet-slack is a slack puppeting bridge for matrix.
It handles bridging private and group DMs, as well as Guilds (servers).
'';
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
after = [ "network-online.target" ] ++ cfg.serviceDependencies;
preStart = ''
# generate the appservice's registration file if absent
if [ ! -f '${registrationFile}' ]; then
${mx-puppet-slack}/bin/mx-puppet-slack -r -c ${settingsFile} \
-f ${registrationFile}
fi
'';
serviceConfig = {
Type = "simple";
Restart = "always";
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
DynamicUser = true;
PrivateTmp = true;
WorkingDirectory = mx-puppet-slack;
StateDirectory = baseNameOf dataDir;
UMask = 0027;
ExecStart = ''
${mx-puppet-slack}/bin/mx-puppet-slack \
-c ${settingsFile} \
-f ${registrationFile}
'';
};
};
};
meta.maintainers = with maintainers; [ chickensoupwithrice ];
}
+159
View File
@@ -0,0 +1,159 @@
{ config, pkgs, ... }: {
# grafana configuration
#services.grafana = {
# enable = true;
# domain = "grafana.mossnet.lan";
# port = 2342;
# addr = "127.0.0.1";
#};
#
## nginx reverse proxy
#services.nginx.virtualHosts.${config.services.grafana.domain} = {
# locations."/" = {
# proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
# proxyWebsockets = true;
# };
#};
services.prometheus = {
enable = true;
port = 9001;
exporters = {
node = {
enable = true;
enabledCollectors = [ "systemd" ];
port = 9002;
};
};
rules = [
''
groups:
- name: synapse
rules:
- record: "synapse_federation_transaction_queue_pendingEdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
- record: "synapse_federation_transaction_queue_pendingPdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
- record: 'synapse_http_server_request_count:method'
labels:
servlet: ""
expr: "sum(synapse_http_server_request_count) by (method)"
- record: 'synapse_http_server_request_count:servlet'
labels:
method: ""
expr: 'sum(synapse_http_server_request_count) by (servlet)'
- record: 'synapse_http_server_request_count:total'
labels:
servlet: ""
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
- record: 'synapse_cache:hit_ratio_5m'
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
- record: 'synapse_cache:hit_ratio_30s'
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
expr: 'synapse_federation_client_sent_edus + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "PDU"
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "Query"
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
expr: 'synapse_federation_server_received_edus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "PDU"
expr: 'synapse_federation_server_received_pdus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "Query"
expr: 'sum(synapse_federation_server_received_queries) by (job)'
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "EDU"
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
''
];
scrapeConfigs = [
{
job_name = "helix";
static_configs = [{
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ];
}];
}
{
job_name = "synapse";
metrics_path = "/_synapse/metrics";
static_configs = [{
targets = [ "localhost:9090" ];
}];
}
{
job_name = "gitea";
static_configs = [{
targets = [ "localhost:3001" ];
}];
}
];
};
networking.firewall = {
enable = true;
allowedTCPPorts = [ 9001 ];
};
services.nginx.virtualHosts."sealight.xyz" = {
locations."/metrics" = {
#basicAuth = { anish = "password"; };
proxyPass = "http://localhost:9001";
};
};
services.loki = {
enable = false; # TODO
#configFile = /var/loki-config.yaml;
};
# systemd.services.promtail = {
# description = "Promtail service for Loki";
# wantedBy = [ "multi-user.target" ];
# serviceConfig = {
# ExecStart = ''
# ${pkgs.grafana-loki}/bin/promtail --config.file ${/var/promtail.yaml}
# '';
# };
# };
}
+19
View File
@@ -0,0 +1,19 @@
{ pkgs, lib, ... }:
{
xdg.mime.defaultApplications = {
"application/pdf" = "firefox.desktop";
"image/png" = [
"sxiv.desktop"
"feh.desktop"
];
"image/jpg" = [
"sxiv.desktop"
"feh.desktop"
];
"image/jpeg" = [
"sxiv.desktop"
"feh.desktop"
];
};
}
+60
View File
@@ -0,0 +1,60 @@
{ config, pkgs, ... }: {
# grafana configuration
services.grafana = {
enable = true;
domain = "stats.mossnet.lan";
port = 2342;
addr = "127.0.0.1";
};
# nginx reverse proxy
services.nginx.recommendedProxySettings = true; # Needed for new grafana versions
services.nginx.virtualHosts.${config.services.grafana.domain} = {
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
proxyWebsockets = true;
};
};
services.prometheus = {
enable = true;
port = 9001;
exporters = {
node = {
enable = true;
enabledCollectors = [ "systemd" ];
port = 9002;
};
dnsmasq = {
enable = true;
port = 9153;
};
};
scrapeConfigs = [
{
job_name = "box";
static_configs = [{ targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ]; }];
}
{
job_name = "dns";
static_configs = [{ targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.dnsmasq.port}" ]; }];
}
];
};
services.loki = {
#enable = true;
#configFile = /var/loki-config.yaml;
};
#systemd.services.promtail = {
# description = "Promtail service for Loki";
# wantedBy = [ "multi-user.target" ];
# serviceConfig = {
# ExecStart = ''
# ${pkgs.grafana-loki}/bin/promtail --config.file ${/var/promtail.yaml}
# '';
# };
#};
}
+14
View File
@@ -0,0 +1,14 @@
{
networking.extraHosts = ''
192.168.1.240 mossnet.lan
192.168.1.240 links.mossnet.lan
192.168.1.240 read.mossnet.lan
192.168.1.240 music.mossnet.lan
192.168.1.240 stats.mossnet.lan
192.168.1.240 file.mossnet.lan
'';
# 10.0.69.4 mossnet.lan
# 10.0.69.4 links.mossnet.lan
# 10.0.69.4 read.mossnet.lan
# 10.0.69.4 stats.mossnet.lan
}
+9
View File
@@ -0,0 +1,9 @@
{ pkgs, lib, ... }:
{
fileSystems."/mnt/mossnet" = {
device = "10.0.69.4:/mnt/two";
fsType = "nfs";
options = [ "noatime" ];
};
}
+62
View File
@@ -0,0 +1,62 @@
{ pkgs, lib, ... }:
{
environment.systemPackages = with pkgs; [
qjackctl
libjack2 # needed for overtone with pipewire
jack2 # needed for overtone with pipewire
# GUI
carla
# Plugins
helm
# surge
distrho
orca-c
supercollider
dirt
sunvox
vcv-rack
lmms
bespokesynth
lsp-plugins
helio-workstation
projectm # milkdrop visualizer
# DAWs
# ardour
# reaper
renoise
];
hardware.pulseaudio.enable = lib.mkForce false;
security.rtkit.enable = true;
services.pipewire = {
enable = true;
alsa.enable = true;
alsa.support32Bit = true;
pulse.enable = true;
jack.enable = true;
config = {
pipewire."context.properties"."default.clock.rate" = "48000";
pipewire-pulse."stream.properties"."resample.quality" = 15;
client."stream.properties"."resample.quality" = 15;
client-rt."stream.properties"."resample.quality" = 15;
#jack."context.modules" = [];
};
#media-session.config.bluez-monitor.properties = {
# "bluez5.headset-roles" = [ "hsp_hs" "hsp_ag" ];
# "bluez5.codecs" = [ "aac" "ldac" "aptx_hd" ];
#};
};
#systemd.services.bluethoot.serviceConfig.ExecStart = [
# ""
# "${config.hardware.bluetooth.package}/libexec/bluetooth/bluetoothd --noplugin=sap"
#];
boot.kernelModules = [ "snd-seq" "snd-rawmidi" ]; # midi sequence kernel mods
hardware.pulseaudio.package = pkgs.pulseaudio.override { jackaudioSupport = true; };
}
+60
View File
@@ -0,0 +1,60 @@
{ pkgs, config, ... }:
{
#age.secrets.nextcloud-db.file = "/etc/nixos/secrets/nextcloud-db.age";
#age.secrets.nextcloud-db.owner = "nextcloud";
#age.secrets.nextcloud-admin.file = "/etc/nixos/secrets/nextcloud-admin.age";
#age.secrets.nextcloud-admin.owner = "nextcloud";
services.nextcloud = {
enable = true;
hostName = "mossnet.lan";
home = "/data/nextcloud2";
package = pkgs.nextcloud22;
# Use HTTPS for links
https = false;
# Auto-update Nextcloud Apps
autoUpdateApps.enable = true;
# Set what time makes sense for you
autoUpdateApps.startAt = "05:00:00";
config = {
# Further forces Nextcloud to use HTTPS
# overwriteProtocol = "https";
# Nextcloud PostegreSQL database configuration, recommended over using SQLite
dbtype = "pgsql";
dbuser = "nextcloud2";
dbhost = "/run/postgresql"; # nextcloud will add /.s.PGSQL.5432 by itself
dbname = "nextcloud2";
dbpassFile = "/var/nextcloud-db-pass";
adminpassFile = "/var/nextcloud-admin-pass";
adminuser = "admin";
};
};
# Enable PostgreSQL
services.postgresql = {
enable = true;
# Ensure the database, user, and permissions always exist
ensureDatabases = [ "nextcloud" ];
ensureUsers = [
{
name = "nextcloud";
ensurePermissions."DATABASE nextcloud" = "ALL PRIVILEGES";
}
];
};
# Ensure that postgres is running before running the setup
systemd.services."nextcloud-setup" = {
requires = [ "postgresql.service" ];
after = [ "postgresql.service" ];
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
#nixpkgs.config.permittedInsecurePackages = [ "nextcloud-19.0.6"];
}
+35
View File
@@ -0,0 +1,35 @@
{
#fileSystems."/export/" = {
# device = "/mnt/mafuyu";
# options = [ "bind" ];
#};
services.nfs.server = {
enable = true;
# fixed rpc.statd port; for firewall
lockdPort = 4001;
mountdPort = 4002;
statdPort = 4000;
extraNfsdConfig = '''';
exports = ''
/home/ftp 192.168.1.0/24(rw)
/mnt/one 192.168.1.0/24(rw)
/mnt/two 192.168.1.0/24(rw,no_subtree_check) 10.0.69.0/24(rw,no_subtree_check)
/mnt/three 192.168.1.0/24(rw)
'';
};
networking.firewall = {
allowedTCPPorts = [ 111 2049 4000 4001 4002 20048 ];
allowedUDPPorts = [ 111 2049 4000 4001 4002 20048 ];
};
#systemd.services.create-mount-dir = {
# serviceConfig = {
# ExecStart = "mkdir /export";
# };
# wantedBy = [ "multi-user.target" ];
# after = [ "remote-fs.target" ];
# description = "Create a directory we can mount on fs";
#};
}
+24
View File
@@ -0,0 +1,24 @@
{ config, lib, pkgs, ... }:
{
services.radicale = {
enable = true;
package = pkgs.radicale;
settings = {
server.hosts = [ "0.0.0.0:5252" ];
};
};
networking.firewall.allowedTCPPorts = [ 5252 ];
services.nginx.virtualHosts."cal.mossnet.lan" = {
enableACME = false;
forceSSL = false;
locations."/" = {
extraConfig = ''
proxy_pass http://localhost:5252/;
proxy_set_header X-Forwarded-Host $host;
'';
};
};
}
+11
View File
@@ -0,0 +1,11 @@
{ pkgs, lib, config, ... }:
{
services.rss-bridge = {
enable = true;
virtualHost = "bridge.sealight.xyz";
whitelist = [ "Facebook" "Bandcamp" "Twitter" "Telegram" "Instagram" "Reddit" ];
};
#services.nginx.virtualHosts."bridge.sealight.xyz".forceSSL = true;
#services.nginx.virtualHosts."bridge.sealight.xyz".enableACME = true;
}
+54
View File
@@ -0,0 +1,54 @@
{ pkgs, ... }:
{
services.seafile = {
enable = true;
seafileSettings = {
fileserver.host = "0.0.0.0";
fileserver.port = 8082;
};
ccnetSettings.General.SERVICE_URL = "http://file.mossnet.lan";
adminEmail = "anish@lakhwara.com";
initialAdminPassword = "arandompasswordsecure!";
};
services.nginx = {
enable = true;
};
services.nginx.upstreams."gunicorn_seafile" = {
servers."unix:/run/seahub/gunicorn.sock" = { };
};
services.nginx.upstreams."seafhttp" = {
servers."127.0.0.1:8082" = { };
};
services.nginx.virtualHosts."file.mossnet.lan" = {
serverName = "file.mossnet.lan";
locations."/" = {
proxyPass = "http://gunicorn_seafile"; # without a trailing /
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Host $server_name;
#proxy_set_header Host $host;
proxy_read_timeout 1200s;
client_max_body_size 0; # disable max upload size
'';
};
locations."/seafhttp" = {
proxyPass = "http://seafhttp"; # without a trailing /
extraConfig = ''
rewrite ^/seafhttp(.*)$ $1 break;
client_max_body_size 0;
proxy_connect_timeout 36000s;
proxy_read_timeout 36000s;
proxy_send_timeout 36000s;
proxy_request_buffering off;
send_timeout 36000s;
'';
};
};
}
+14
View File
@@ -0,0 +1,14 @@
{
# services.nginx = {
# enable = true;
# virtualHosts = {
# "sealight.xyz" = {
# forceSSL = true;
# enableACME = true;
# locations."/" = {
# root = "/var/www/sealight.xyz";
# };
# };
# };
# };
}
+9
View File
@@ -0,0 +1,9 @@
{ self, pkgs, config, lib, ... }:
{
# Secrets used by home-manager modules
age.secrets.fastmail.file = "${self}/secrets/fastmail.age";
age.secrets.fastmail.owner = "anish";
age.secrets.mossnet.file = "${self}/secrets/mossnet.age";
age.secrets.mossnet.owner = "anish";
}
+10
View File
@@ -0,0 +1,10 @@
{ config, ... }:
{
services.openssh = {
enable = true;
passwordAuthentication = false;
permitRootLogin = "no";
};
networking.firewall.allowedTCPPorts = [ 80 433 22 ]; # ssh and website
security.sudo.wheelNeedsPassword = false; # needed for deploy-rs
}
+114
View File
@@ -0,0 +1,114 @@
{ pkgs, config, ... }:
let
dataDir = "/var/www/shaarli-config";
name = "shaarli";
in
{
services.nginx = {
enable = true;
virtualHosts = {
"links.mossnet.lan" = {
forceSSL = false;
enableACME = false;
root = "${dataDir}";
extraConfig = ''
index index.php;
'';
locations = {
"/".extraConfig = ''
index index.php;
try_files _ /index.php$is_args$args;
'';
"~ (index)\\.php$".extraConfig = ''
try_files $uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:${config.services.phpfpm.pools."${name}".socket};
fastcgi_index index.php;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
'';
"~ \\.php$".extraConfig = ''
deny all;
'';
"^~ /tpl/".extraConfig = "alias ${pkgs.shaarli}/tpl/;";
"^~ /plugins/".extraConfig = "alias ${pkgs.shaarli}/plugins/;";
"~* \\.(?:ico|css|js|gif|jpe?g|png)$".extraConfig = ''
expires max;
add_header Pragma public;
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
'';
};
};
};
};
services.phpfpm = {
pools = {
"${name}" = {
user = "nginx";
group = "nginx";
#listen = "/run/phpfpm/php.sock";
settings = {
"listen.owner" = "nginx";
"listen.group" = "nginx";
"user" = "nginx";
"pm" = "dynamic";
"pm.max_children" = "4";
"pm.min_spare_servers" = "1";
"pm.max_spare_servers" = "4";
"pm.max_requests" = "32";
"catch_workers_output" = "1";
"php_admin_value[error_log]" = "/var/log/nginx/${name}-phpfpm-error.log";
};
};
};
};
users.extraUsers."${name}" = { group = "${name}"; };
users.extraGroups."${name}" = { };
users.users.shaarli.isSystemUser = true;
systemd.services.shaarli-install = {
serviceConfig.Type = "oneshot";
wantedBy = [ "multi-user.target" ];
script = ''
if [ ! -d "${dataDir}" ]; then
#cp -R ${pkgs.shaarli}/data.orig/.htaccess ${dataDir}/cache/
#cp -R ${pkgs.shaarli}/data.orig/.htaccess ${dataDir}/data/
#cp -R ${pkgs.shaarli}/data.orig/.htaccess ${dataDir}/pagecache/
#cp -R ${pkgs.shaarli}/data.orig/.htaccess ${dataDir}/tmp/
mkdir -p ${dataDir}/{cache,data,pagecache,tmp}
ln -s ${pkgs.shaarli}/* ${dataDir}
fi
chown -Rc nginx:nginx ${dataDir}
find ${dataDir} -type d ! -perm 0700 -exec chmod 0700 {} \; -exec chmod g-s {} \;
find ${dataDir} -type f ! -perm 0600 -exec chmod 0600 {} \;
'';
};
#networking.firewall.allowedTCPPorts = [ 80 ];
}
+6
View File
@@ -0,0 +1,6 @@
{ self, pkgs, ... }:
{
services."anish.lakhwara.com".enable = true;
security.acme.email = "anish+acme@lakhwara.com";
security.acme.acceptTerms = true;
}
+21
View File
@@ -0,0 +1,21 @@
{ pkgs, ... }:
{
systemd.services.vdir-sync = {
serviceConfig.Type = "oneshot";
path = [
pkgs.vdirsyncer
];
script = ''
vdirsyncer sync
'';
serviceConfig = {
User = "anish";
};
};
systemd.timers.vdir-sync = {
wantedBy = [ "timers.target" ];
partOf = [ "vdir-sync.service" ];
timerConfig.OnCalendar = [ "hourly" ];
};
}
+26
View File
@@ -0,0 +1,26 @@
{ pkgs, ... }:
{
systemd.services.kitaab-sync = {
serviceConfig.Type = "oneshot";
path = [
pkgs.git
pkgs.coreutils
];
script = ''
cd /home/anish/kitaab
git add -A
git diff-index --quiet HEAD || git commit -m 'syncing kitaab' # if nothing, don't exit 1
git push
exit 0
'';
serviceConfig = {
User = "anish";
};
};
systemd.timers.kitaab-sync = {
wantedBy = [ "timers.target" ];
partOf = [ "kitaab-sync.service" ];
timerConfig.OnCalendar = [ "hourly" ];
};
}
+20
View File
@@ -0,0 +1,20 @@
{ pkgs, lib, ... }:
{
systemd.services.get-music-sync = {
serviceConfig.Type = "oneshot";
path = [
pkgs.coreutils
pkgs.rsync
];
script = builtins.readFile ./get-music.sh;
serviceConfig = {
User = "anish";
};
};
systemd.timers.get-music-timer = {
wantedBy = [ "timers.target" ];
partOf = [ "get-music-sync.service" ];
timerConfig.OnCalendar = [ "daily" ];
};
}
+9
View File
@@ -0,0 +1,9 @@
#!/usr/bin/env bash
rsync -r --ignore-existing --log-file=/data/incoming/download-log hypercube@talos.feralhosting.com:private/transmission/data/* /data/incoming
# you need to set defaults for beets
# if already imported -> Skip
# Auto accept changes
# also install it lmao
#beet import /data/incoming
+35
View File
@@ -0,0 +1,35 @@
{ pkgs, inputs, ... }:
{
systemd.services.website-deploy = {
serviceConfig.Type = "oneshot";
path = [
pkgs.git
pkgs.coreutils
pkgs.nixUnstable
pkgs.openssh
inputs.deploy.packages.${pkgs.system}.deploy-rs
];
script = ''
cd /etc/nixos/ # TODO make variable
nix flake lock --update-input poonam
nix flake lock --update-input basant
# TODO
# git add flake.lock
# git commit -m "update website"
# git push
deploy .#cube --hostname lakhwara.com
exit 0
'';
serviceConfig = {
User = "anish";
};
};
systemd.timers.website-deploy = {
after = [ "kitaab-sync.timer" ];
wantedBy = [ "timers.target" ];
partOf = [ "website-deploy.service" ];
timerConfig.OnCalendar = [ "daily" ];
timerConfig.persistent = "true";
};
}
+14
View File
@@ -0,0 +1,14 @@
{ pkgs, ... }:
{
services.syncthing = {
enable = true;
user = "anish";
group = "users";
dataDir = "/home/anish/";
configDir = "/home/anish/.config/syncthing";
};
networking.firewall.allowedTCPPorts = [ 8384 22000 ];
networking.firewall.allowedUDPPorts = [ 22000 21027 ];
}
+9
View File
@@ -0,0 +1,9 @@
{ config, pkgs, ... }:
{
services.taskserver.enable = true;
services.taskserver.fqdn = "task.moss";
services.taskserver.listenHost = "0.0.0.0";
services.taskserver.listenPort = 53589;
services.taskserver.organisations.mossnet.users = [ "anish" ];
}
+100
View File
@@ -0,0 +1,100 @@
{ pkgs, config, lib, ... }:
{
services.postgresql = {
enable = true;
package = pkgs.postgresql_11;
# Ensure the database, user, and permissions always exist
ensureDatabases = [ "wallabag" ];
ensureUsers = [
{
name = "wallabag";
ensurePermissions."DATABASE wallabag" = "ALL PRIVILEGES";
}
];
};
services.wallabag = {
enable = true;
hostName = "read.mossnet.lan";
package = pkgs.wallabag;
conf = ''
# This file is a "template" of what your parameters.yml file should look like
parameters:
# Uncomment these settings or manually update your parameters.yml
# to use docker-compose
#
# database_driver: %env.database_driver%
# database_host: %env.database_host%
# database_port: %env.database_port%
# database_name: %env.database_name%
# database_user: %env.database_user%
# database_password: %env.database_password%
database_driver: pdo_pgsql
database_host: localhost
database_port: ~
database_name: wallabag
database_user: wallabag
database_password: wallabag
# For SQLite, database_path should be "%kernel.project_dir%/data/db/wallabag.sqlite"
database_path: ~
database_table_prefix: wallabag_
database_socket: null
# with PostgreSQL and SQLite, you must set "utf8"
database_charset: utf8
domain_name: http://read.mossnet.lan
server_name: "mossnet wallabag instance"
mailer_transport: smtp
mailer_user: ~
mailer_password: ~
mailer_host: 127.0.0.1
mailer_port: false
mailer_encryption: ~
mailer_auth_mode: ~
locale: en
# A secret key that's used to generate certain security-related tokens
secret: SAFGOECRIlfal89oe6u0(*^dsaaih961
# two factor stuff
twofactor_auth: true
twofactor_sender: no-reply@wallabag.org
# fosuser stuff
fosuser_registration: true
fosuser_confirmation: true
# how long the access token should live in seconds for the API
fos_oauth_server_access_token_lifetime: 3600
# how long the refresh token should life in seconds for the API
fos_oauth_server_refresh_token_lifetime: 1209600
from_email: no-reply@wallabag.org
rss_limit: 50
# RabbitMQ processing
rabbitmq_host: localhost
rabbitmq_port: 5672
rabbitmq_user: guest
rabbitmq_password: guest
rabbitmq_prefetch_count: 10
# Redis processing
redis_scheme: tcp
redis_host: localhost
redis_port: 6379
redis_path: null
redis_password: null
# sentry logging
sentry_dsn: ~
'';
};
# networking.firewall.allowedTCPPorts = [ 8080 ];
}
+42
View File
@@ -0,0 +1,42 @@
{ config, pkgs, ... }:
{
networking.dhcpcd.wait = "background";
networking.dhcpcd.extraConfig = "noarp";
networking.wireless.userControlled.enable = true;
networking.wireless.networks = {
"Chester 11403" = {
pskRaw = "43fcb0bea43633899a9885865c53ea79d268b9bdfb8c3f4013718e43e6672e5e";
};
"HSBNEWiFi" = {
pskRaw = "0820d84980eeb47630f13f04804fc9add684a8feebb64ba914cafc48f569d801";
};
"TP-LINK_1D79" = {
pskRaw = "6d960910c33a59e94b151281cc3982863b4b112d8a4efd1b165e4f8e52e7dae8";
};
"nadir" = {
pskRaw = "92e0c1f0b3a1bada333964f0ee4ac04e0d9ba941aa3f29216cc3782595a41e5f";
};
"WiFi-399631" = {
pskRaw = "2e312c9721e470847f751d8b844d264dc5e2612361a579fc0bdebd2135b5a8ea";
};
"line" = {
pskRaw = "ec36a5a116224c12e305c90b7f3c5a0b7417abfaacf15828748f6a1e1e316c03";
};
"MEHRA" = {
pskRaw = "e1573bc9e30fd68ac8a805b7c5f9871322ffcdac909746831555905c2e156abb";
};
"Sandeep1" = {
pskRaw = "035972401640bf4dc2d8651c0b9df515a69f6bc2bffa07cc936183128a6ccf0a";
};
"chadpad" = {
pskRaw = "2594b3880a60817950bd60b2770c9793fd4cb784ed183f685a52502a9df2c7b1";
};
# "updog" = {
# pskRaw = "be28d9e50e4d9a065f1afccad52675d0d4fabe85526c0245648671721db606b1";
# };
"Vodafone-07F38" = {
pskRaw = "4ffd87de73ed31ef1aee1d0d178857490afbda3f0bb8453a0baaee7b2576f302";
};
};
}
+61
View File
@@ -0,0 +1,61 @@
{ config, pkgs, lib, ... }:
{
services.tailscale.enable = true;
networking.firewall.allowedUDPPorts = [ 60990 ];
# Enable WireGuard
networking.wireguard.interfaces = {
# "wg0" is the network interface name. You can name the interface arbitrarily.
wg0 = {
# Determines the IP address and subnet of the client's end of the tunnel interface.
ips = [ "10.0.69.4/24" ];
listenPort = 60990; # to match firewall allowedUDPPorts (without this wg uses random port numbers)
# Path to the private key file.
#
# Note: The private key can also be included inline via the privateKey option,
# but this makes the private key world-readable; thus, using privateKeyFile is
# recommended.
privateKeyFile = "/home/anish/wg/privkey";
peers = [
# For a client configuration, one peer entry for the server will suffice.
{
# Public key of the server (not a file path).
publicKey = "c1J4p63rD3IlszugMZiki7UBV3YmDdqa3DU4UejXzAI=";
# Forward all the traffic via VPN.
allowedIPs = [ "10.0.69.0/24" ];
# Or forward only particular subnets
#allowedIPs = [ "10.100.0.1" "91.108.12.0/22" ];
# Set this to the server IP and port.
endpoint = "69.61.38.225:60990"; # ToDo: route to endpoint not automatically configured https://wiki.archlinux.org/index.php/WireGuard#Loop_routing https://discourse.nixos.org/t/solved-minimal-firewall-setup-for-wireguard-client/7577
# Send keepalives every 25 seconds. Important to keep NAT tables alive.
persistentKeepalive = 25;
}
];
};
};
# boot.extraModulePackages = with config.boot.kernelPackages; [ wireguard ];
# environment.systemPackages = [ pkgs.wireguard ];
# networking.wireguard.enable = true;
# networking.wireguard.interfaces = {
# wg0 = {
# ips = [ "10.0.69.4/32" ];
# privateKeyFile = "/home/anish/wg/privkey";
# listenPort = 60990;
#
# peers = [
# { # helix - server
# publicKey = "{c1J4p63rD3IlszugMZiki7UBV3YmDdqa3DU4UejXzAI=}";
# allowedIPs = [ "10.0.69.1/24" ];
# endpoint = "sealight.xyz:60990";
# persistentKeepalive = 25;
# } ];
# };
# };
}
+41
View File
@@ -0,0 +1,41 @@
{ config, pkgs, lib, ... }:
{
environment.systemPackages = [ pkgs.wireguard-tools pkgs.mosh ];
networking.nat = {
enable = true;
externalInterface = "ens3";
internalInterfaces = [ "wg0" ];
};
networking.firewall = {
allowedUDPPorts = [ 60990 ];
};
networking.wireguard.interfaces.wg0 = {
ips = [ "10.0.69.1/24" ];
listenPort = 60990;
privateKeyFile = "/var/lib/wireguard/private";
generatePrivateKeyFile = true; # TODO agenix secret
peers = [
{
# box
publicKey = "Ra78mOc110K7URN5uB3m9d78iBKgeRHzT+3HkiFp9BU=";
allowedIPs = [ "10.0.69.4/32" ];
}
{
# line
publicKey = "fsb1FuGmYi89p4CVXytKw2FYamONKtoanxrgs/dJjC8=";
allowedIPs = [ "10.0.69.3/32" ];
}
{
# curve
publicKey = "XmV4AcPPND/eeSI6d9M1iahCjtymqY6DdUDEFNP/x3g=";
allowedIPs = [ "10.0.69.2/32" ];
}
{
# helix
publicKey = "gcdq86hhEUlqF2chqYB/F8pALyAMNFvwLycxBoHuoDs=";
allowedIPs = [ "10.0.69.5/32" ];
}
];
};
}
+30
View File
@@ -0,0 +1,30 @@
{ config, pkgs, lib, ... }:
{
# services.tailscale.enable = true;
networking.firewall.allowedUDPPorts = [ 60990 ];
age.secrets.curve-wg.file = "/etc/nixos/secrets/curve-wg.age";
age.secrets.curve-wg.owner = "anish";
networking.wireguard.interfaces = {
# TODO need some kind of module here to configure curve & box from hosts or something
wg0 = {
ips = [ "10.0.69.2/24" ];
listenPort = 60990; # to match firewall allowedUDPPorts (without this wg uses random port numbers)
privateKeyFile = "/run/agenix/curve-wg";
peers = [
# For a client configuration, one peer entry for the server will suffice.
{
publicKey = "c1J4p63rD3IlszugMZiki7UBV3YmDdqa3DU4UejXzAI=";
allowedIPs = [ "10.0.69.0/24" ];
# Set this to the server IP and port.
endpoint = "69.61.38.225:60990"; # ToDo: route to endpoint not automatically configured https://wiki.archlinux.org/index.php/WireGuard#Loop_routing https://discourse.nixos.org/t/solved-minimal-firewall-setup-for-wireguard-client/7577
# Send keepalives every 25 seconds. Important to keep NAT tables alive.
persistentKeepalive = 25;
}
];
};
};
}