why the ffuck is this not working?
Some checks failed
/ nix fmt (push) Has been cancelled

This commit is contained in:
Ittihadyya 2024-11-26 13:52:46 +02:00
parent 9d2c1aa4df
commit ec9143f3ad
7 changed files with 126 additions and 107 deletions

View file

@ -6,4 +6,4 @@ jobs:
name: "nix fmt"
steps:
- uses: actions/checkout@v4
- run: nixfmt $(find ./ -regex '.*.nix$') -- --check
- run: nix fmt $(find ./ -regex '.*.nix$') -- --check

View file

@ -1,43 +1,44 @@
{
cluster-testing.modules = [
({
/* services.k3s = { # just hogging resources at this moment
enable = true;
role = "server";
# token = ""; # Agent nodes are joined to the master node using a node-token which can be found on the master node at /var/lib/rancher/k3s/server/node-token.
clusterInit = true;
# allegedly you need different configs for non-starting nodes, including the ip of a server. you should handle this within nix, preferrably -e
# allegedly: " If you are configuring an HA cluster with an embedded etcd, the 1st server must have clusterInit = true and other servers must connect to it using serverAddr. " # I think you can get around this kinda by pointing to a domain, so that if the server with the address specified in the config fails, others take the request. i am not sure about the details of the implementation - i.e how to do it without giving authority to a specific node. This is more of a theoretical problem, i think, since this only matters when a node starts up and gets to be part of the cluster - after it's included i'm pretty sure it would be fine? Might need to do some testing -e
# this kinda makes sense? like otherwise how would the new clusters know where to connect to ? Because it uses raft, the serverAddr doesn't necessarily have to be the one with clusterInit, as, according to the Raft specification, calls to followers get forwarded to the leader node. -e
extraFlags = [
# "--flannel-backend none"
# "--disable-network-policy"
# "--no-deploy traefik"
]; # --flannel-backend-none and --disable-network-policy prepare the cluster for cillium, which, as far as i can see, i need to install imperatively because it isn't a service or packaged within nixpkgs. The command used is `cilium install --version 1.x.x --set=ipam.operator.clusterPoolIPv4PodCIDRList="10.42.0.0/16"`,replace the x's with whatever version you need, as of 2024.09.20 1.16.1 is the latest (released on the 14th of August 2024, according to their github). Godspeed to future addy if we decide to do package it ourselves or something. -e
# configPath = ./k3s.yaml;
}; # decided to try stock kubernetes since k3s doesn't seem to be working as i intend --- a week later --- YOU BUMBLING MORON YOU ARE ON UNSTABLE AND YOU WERE LOOKING AT 24.05 DOCS
/*
services.kubernetes = {
# flannel.enable = false;
roles = [ "master" "node" ];
masterAddress = "10.12.96.4";
#apiserverAddress = "10.12.96.4:6443";
kubelet = {
enable = true;
extraOpts = "--fail-swap-on=false";
};
scheduler.enable = true;
apiserver ={
enable = true;
advertiseAddress = "10.12.96.4";
securePort = 6443;
};
easyCerts = true;
pki.enable = true;
addons.dns.enable = true;
controllerManager.enable = true;
addonManager.enable = true;
}; #chat is this factual
services.k3s = { # just hogging resources at this moment
enable = true;
role = "server";
# token = ""; # Agent nodes are joined to the master node using a node-token which can be found on the master node at /var/lib/rancher/k3s/server/node-token.
clusterInit = true;
# allegedly you need different configs for non-starting nodes, including the ip of a server. you should handle this within nix, preferrably -e
# allegedly: " If you are configuring an HA cluster with an embedded etcd, the 1st server must have clusterInit = true and other servers must connect to it using serverAddr. " # I think you can get around this kinda by pointing to a domain, so that if the server with the address specified in the config fails, others take the request. i am not sure about the details of the implementation - i.e how to do it without giving authority to a specific node. This is more of a theoretical problem, i think, since this only matters when a node starts up and gets to be part of the cluster - after it's included i'm pretty sure it would be fine? Might need to do some testing -e
# this kinda makes sense? like otherwise how would the new clusters know where to connect to ? Because it uses raft, the serverAddr doesn't necessarily have to be the one with clusterInit, as, according to the Raft specification, calls to followers get forwarded to the leader node. -e
extraFlags = [
# "--flannel-backend none"
# "--disable-network-policy"
# "--no-deploy traefik"
]; # --flannel-backend-none and --disable-network-policy prepare the cluster for cillium, which, as far as i can see, i need to install imperatively because it isn't a service or packaged within nixpkgs. The command used is `cilium install --version 1.x.x --set=ipam.operator.clusterPoolIPv4PodCIDRList="10.42.0.0/16"`,replace the x's with whatever version you need, as of 2024.09.20 1.16.1 is the latest (released on the 14th of August 2024, according to their github). Godspeed to future addy if we decide to do package it ourselves or something. -e
# configPath = ./k3s.yaml;
}; # decided to try stock kubernetes since k3s doesn't seem to be working as i intend --- a week later --- YOU BUMBLING MORON YOU ARE ON UNSTABLE AND YOU WERE LOOKING AT 24.05 DOCS
/*
services.kubernetes = {
# flannel.enable = false;
roles = [ "master" "node" ];
masterAddress = "10.12.96.4";
#apiserverAddress = "10.12.96.4:6443";
kubelet = {
enable = true;
extraOpts = "--fail-swap-on=false";
};
scheduler.enable = true;
apiserver ={
enable = true;
advertiseAddress = "10.12.96.4";
securePort = 6443;
};
easyCerts = true;
pki.enable = true;
addons.dns.enable = true;
controllerManager.enable = true;
addonManager.enable = true;
}; #chat is this factual
*/
})
];

View file

@ -220,7 +220,10 @@ in
];
fructose.modules = [ garbage-collection-module ];
menthol.modules = [ distributed-build-module ];
aspartame.modules = [ distributed-build-module garbage-collection-module ];
aspartame.modules = [
distributed-build-module
garbage-collection-module
];
capsaicin.modules = [ garbage-collection-module ];
universal.home_modules = [

View file

@ -1,30 +1,40 @@
{
sucrose.modules = [
({pkgs, config, lib, ...}: {
services.gitea-actions-runner= {
package = pkgs.forgejo-runner;
instances = {
${config.networking.hostName} = {
enable = true;
hostPackages = with pkgs; lib.mkDefault [
bash
coreutils
curl
gawk
gitMinimal
gnused
wget
nix
nixfmt-rfc-style
];
labels = [ ];
name = config.networking.hostName;
settings = { };
tokenFile = config.sops.templates."forgejo_runner.env.secrets.yaml".path;
url = "https://git.collective-conciousness.monster";
(
{
pkgs,
config,
lib,
...
}:
{
services.gitea-actions-runner = {
package = pkgs.forgejo-runner;
instances = {
${config.networking.hostName} = {
enable = true;
hostPackages =
with pkgs;
lib.mkDefault [
bash
coreutils
curl
gawk
gitMinimal
gnused
wget
nix
nixfmt-rfc-style
];
labels = [ ];
name = config.networking.hostName;
settings = { };
tokenFile = config.sops.templates."forgejo_runner.env.secrets.yaml".path;
url = "https://git.collective-conciousness.monster";
};
};
};
};
})
}
)
];
}
}

View file

@ -18,7 +18,7 @@
password = "$MURMUR_LOGIN_PASSWORD";
port = 64738; # tcp/udp - this is the default but i'm writing it out as to not have to dig into the declaration every time i need to know which port it is.
openFirewall = false;
openFirewall = false;
registerName = "Adyya's cave of hushed tones";
registerUrl = "https://mumble.collective-conciousness.monster";
registerHostname = "mumble.collective-conciousness.mosnter"; # i vaguely know what the difference between these two are but it is a bit strange, i guess.

View file

@ -34,12 +34,15 @@
)
];
sucrose.modules = [
({config, ...}: {
sops.secrets."forgejo_runner_${config.networking.hostName}_token" = { };
sops.templates."forgejo_runner.env.secrets.yaml".content = ''
TOKEN=${config.sops.placeholder."forgejo_runner_${config.networking.hostName}_token"}
'';
})
(
{ config, ... }:
{
sops.secrets."forgejo_runner_${config.networking.hostName}_token" = { };
sops.templates."forgejo_runner.env.secrets.yaml".content = ''
TOKEN=${config.sops.placeholder."forgejo_runner_${config.networking.hostName}_token"}
'';
}
)
];
glucose.modules = [
({

View file

@ -126,45 +126,47 @@
"0:audio" = [ { title = "pwvucontrol$|noisetorch$"; } ];
};
/* colors = {
# should probably use a let ... in ... here
background = "#212121";
focused = {
# border = "#2b83a6"; # test to see if stylix does magic
background = "#2b83a6";
text = "#ffffff";
indicator = "#dddddd";
childBorder = "#2b83a6";
};
focusedInactive = {
border = "#212121";
/*
colors = {
# should probably use a let ... in ... here
background = "#212121";
text = "#86888c";
indicator = "#292d2e";
childBorder = "#5a5a5a";
focused = {
# border = "#2b83a6"; # test to see if stylix does magic
background = "#2b83a6";
text = "#ffffff";
indicator = "#dddddd";
childBorder = "#2b83a6";
};
focusedInactive = {
border = "#212121";
background = "#212121";
text = "#86888c";
indicator = "#292d2e";
childBorder = "#5a5a5a";
};
unfocused = {
border = "#212121";
background = "#212121";
text = "#86888c";
indicator = "#292d2e";
childBorder = "#5a5a5a";
};
urgent = {
border = "#d64e4e";
background = "#d64e4e";
text = "#ffffff";
indicator = "#d64e4e";
childBorder = "#d64e4e";
};
placeholder = {
border = "#212121";
background = "#0c0c0c";
text = "#ffffff";
indicator = "#212121";
childBorder = "#262626";
};
};
unfocused = {
border = "#212121";
background = "#212121";
text = "#86888c";
indicator = "#292d2e";
childBorder = "#5a5a5a";
};
urgent = {
border = "#d64e4e";
background = "#d64e4e";
text = "#ffffff";
indicator = "#d64e4e";
childBorder = "#d64e4e";
};
placeholder = {
border = "#212121";
background = "#0c0c0c";
text = "#ffffff";
indicator = "#212121";
childBorder = "#262626";
};
};*/
*/
bars = [
({