This commit is contained in:
parent
9d2c1aa4df
commit
ec9143f3ad
7 changed files with 126 additions and 107 deletions
|
@ -6,4 +6,4 @@ jobs:
|
||||||
name: "nix fmt"
|
name: "nix fmt"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nixfmt $(find ./ -regex '.*.nix$') -- --check
|
- run: nix fmt $(find ./ -regex '.*.nix$') -- --check
|
|
@ -1,43 +1,44 @@
|
||||||
{
|
{
|
||||||
cluster-testing.modules = [
|
cluster-testing.modules = [
|
||||||
({
|
({
|
||||||
/* services.k3s = { # just hogging resources at this moment
|
|
||||||
enable = true;
|
|
||||||
role = "server";
|
|
||||||
# token = ""; # Agent nodes are joined to the master node using a node-token which can be found on the master node at /var/lib/rancher/k3s/server/node-token.
|
|
||||||
clusterInit = true;
|
|
||||||
# allegedly you need different configs for non-starting nodes, including the ip of a server. you should handle this within nix, preferrably -e
|
|
||||||
# allegedly: " If you are configuring an HA cluster with an embedded etcd, the 1st server must have clusterInit = true and other servers must connect to it using serverAddr. " # I think you can get around this kinda by pointing to a domain, so that if the server with the address specified in the config fails, others take the request. i am not sure about the details of the implementation - i.e how to do it without giving authority to a specific node. This is more of a theoretical problem, i think, since this only matters when a node starts up and gets to be part of the cluster - after it's included i'm pretty sure it would be fine? Might need to do some testing -e
|
|
||||||
# this kinda makes sense? like otherwise how would the new clusters know where to connect to ? Because it uses raft, the serverAddr doesn't necessarily have to be the one with clusterInit, as, according to the Raft specification, calls to followers get forwarded to the leader node. -e
|
|
||||||
extraFlags = [
|
|
||||||
# "--flannel-backend none"
|
|
||||||
# "--disable-network-policy"
|
|
||||||
# "--no-deploy traefik"
|
|
||||||
]; # --flannel-backend-none and --disable-network-policy prepare the cluster for cillium, which, as far as i can see, i need to install imperatively because it isn't a service or packaged within nixpkgs. The command used is `cilium install --version 1.x.x --set=ipam.operator.clusterPoolIPv4PodCIDRList="10.42.0.0/16"`,replace the x's with whatever version you need, as of 2024.09.20 1.16.1 is the latest (released on the 14th of August 2024, according to their github). Godspeed to future addy if we decide to do package it ourselves or something. -e
|
|
||||||
# configPath = ./k3s.yaml;
|
|
||||||
}; # decided to try stock kubernetes since k3s doesn't seem to be working as i intend --- a week later --- YOU BUMBLING MORON YOU ARE ON UNSTABLE AND YOU WERE LOOKING AT 24.05 DOCS
|
|
||||||
/*
|
/*
|
||||||
services.kubernetes = {
|
services.k3s = { # just hogging resources at this moment
|
||||||
# flannel.enable = false;
|
enable = true;
|
||||||
roles = [ "master" "node" ];
|
role = "server";
|
||||||
masterAddress = "10.12.96.4";
|
# token = ""; # Agent nodes are joined to the master node using a node-token which can be found on the master node at /var/lib/rancher/k3s/server/node-token.
|
||||||
#apiserverAddress = "10.12.96.4:6443";
|
clusterInit = true;
|
||||||
kubelet = {
|
# allegedly you need different configs for non-starting nodes, including the ip of a server. you should handle this within nix, preferrably -e
|
||||||
enable = true;
|
# allegedly: " If you are configuring an HA cluster with an embedded etcd, the 1st server must have clusterInit = true and other servers must connect to it using serverAddr. " # I think you can get around this kinda by pointing to a domain, so that if the server with the address specified in the config fails, others take the request. i am not sure about the details of the implementation - i.e how to do it without giving authority to a specific node. This is more of a theoretical problem, i think, since this only matters when a node starts up and gets to be part of the cluster - after it's included i'm pretty sure it would be fine? Might need to do some testing -e
|
||||||
extraOpts = "--fail-swap-on=false";
|
# this kinda makes sense? like otherwise how would the new clusters know where to connect to ? Because it uses raft, the serverAddr doesn't necessarily have to be the one with clusterInit, as, according to the Raft specification, calls to followers get forwarded to the leader node. -e
|
||||||
};
|
extraFlags = [
|
||||||
scheduler.enable = true;
|
# "--flannel-backend none"
|
||||||
apiserver ={
|
# "--disable-network-policy"
|
||||||
enable = true;
|
# "--no-deploy traefik"
|
||||||
advertiseAddress = "10.12.96.4";
|
]; # --flannel-backend-none and --disable-network-policy prepare the cluster for cillium, which, as far as i can see, i need to install imperatively because it isn't a service or packaged within nixpkgs. The command used is `cilium install --version 1.x.x --set=ipam.operator.clusterPoolIPv4PodCIDRList="10.42.0.0/16"`,replace the x's with whatever version you need, as of 2024.09.20 1.16.1 is the latest (released on the 14th of August 2024, according to their github). Godspeed to future addy if we decide to do package it ourselves or something. -e
|
||||||
securePort = 6443;
|
# configPath = ./k3s.yaml;
|
||||||
};
|
}; # decided to try stock kubernetes since k3s doesn't seem to be working as i intend --- a week later --- YOU BUMBLING MORON YOU ARE ON UNSTABLE AND YOU WERE LOOKING AT 24.05 DOCS
|
||||||
easyCerts = true;
|
/*
|
||||||
pki.enable = true;
|
services.kubernetes = {
|
||||||
addons.dns.enable = true;
|
# flannel.enable = false;
|
||||||
controllerManager.enable = true;
|
roles = [ "master" "node" ];
|
||||||
addonManager.enable = true;
|
masterAddress = "10.12.96.4";
|
||||||
}; #chat is this factual
|
#apiserverAddress = "10.12.96.4:6443";
|
||||||
|
kubelet = {
|
||||||
|
enable = true;
|
||||||
|
extraOpts = "--fail-swap-on=false";
|
||||||
|
};
|
||||||
|
scheduler.enable = true;
|
||||||
|
apiserver ={
|
||||||
|
enable = true;
|
||||||
|
advertiseAddress = "10.12.96.4";
|
||||||
|
securePort = 6443;
|
||||||
|
};
|
||||||
|
easyCerts = true;
|
||||||
|
pki.enable = true;
|
||||||
|
addons.dns.enable = true;
|
||||||
|
controllerManager.enable = true;
|
||||||
|
addonManager.enable = true;
|
||||||
|
}; #chat is this factual
|
||||||
*/
|
*/
|
||||||
})
|
})
|
||||||
];
|
];
|
||||||
|
|
|
@ -220,7 +220,10 @@ in
|
||||||
];
|
];
|
||||||
fructose.modules = [ garbage-collection-module ];
|
fructose.modules = [ garbage-collection-module ];
|
||||||
menthol.modules = [ distributed-build-module ];
|
menthol.modules = [ distributed-build-module ];
|
||||||
aspartame.modules = [ distributed-build-module garbage-collection-module ];
|
aspartame.modules = [
|
||||||
|
distributed-build-module
|
||||||
|
garbage-collection-module
|
||||||
|
];
|
||||||
capsaicin.modules = [ garbage-collection-module ];
|
capsaicin.modules = [ garbage-collection-module ];
|
||||||
|
|
||||||
universal.home_modules = [
|
universal.home_modules = [
|
||||||
|
|
|
@ -1,30 +1,40 @@
|
||||||
{
|
{
|
||||||
sucrose.modules = [
|
sucrose.modules = [
|
||||||
({pkgs, config, lib, ...}: {
|
(
|
||||||
services.gitea-actions-runner= {
|
{
|
||||||
package = pkgs.forgejo-runner;
|
pkgs,
|
||||||
instances = {
|
config,
|
||||||
${config.networking.hostName} = {
|
lib,
|
||||||
enable = true;
|
...
|
||||||
hostPackages = with pkgs; lib.mkDefault [
|
}:
|
||||||
bash
|
{
|
||||||
coreutils
|
services.gitea-actions-runner = {
|
||||||
curl
|
package = pkgs.forgejo-runner;
|
||||||
gawk
|
instances = {
|
||||||
gitMinimal
|
${config.networking.hostName} = {
|
||||||
gnused
|
enable = true;
|
||||||
wget
|
hostPackages =
|
||||||
nix
|
with pkgs;
|
||||||
nixfmt-rfc-style
|
lib.mkDefault [
|
||||||
];
|
bash
|
||||||
labels = [ ];
|
coreutils
|
||||||
name = config.networking.hostName;
|
curl
|
||||||
settings = { };
|
gawk
|
||||||
tokenFile = config.sops.templates."forgejo_runner.env.secrets.yaml".path;
|
gitMinimal
|
||||||
url = "https://git.collective-conciousness.monster";
|
gnused
|
||||||
|
wget
|
||||||
|
nix
|
||||||
|
nixfmt-rfc-style
|
||||||
|
];
|
||||||
|
labels = [ ];
|
||||||
|
name = config.networking.hostName;
|
||||||
|
settings = { };
|
||||||
|
tokenFile = config.sops.templates."forgejo_runner.env.secrets.yaml".path;
|
||||||
|
url = "https://git.collective-conciousness.monster";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
}
|
||||||
})
|
)
|
||||||
];
|
];
|
||||||
}
|
}
|
15
sops.mod.nix
15
sops.mod.nix
|
@ -34,12 +34,15 @@
|
||||||
)
|
)
|
||||||
];
|
];
|
||||||
sucrose.modules = [
|
sucrose.modules = [
|
||||||
({config, ...}: {
|
(
|
||||||
sops.secrets."forgejo_runner_${config.networking.hostName}_token" = { };
|
{ config, ... }:
|
||||||
sops.templates."forgejo_runner.env.secrets.yaml".content = ''
|
{
|
||||||
TOKEN=${config.sops.placeholder."forgejo_runner_${config.networking.hostName}_token"}
|
sops.secrets."forgejo_runner_${config.networking.hostName}_token" = { };
|
||||||
'';
|
sops.templates."forgejo_runner.env.secrets.yaml".content = ''
|
||||||
})
|
TOKEN=${config.sops.placeholder."forgejo_runner_${config.networking.hostName}_token"}
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
)
|
||||||
];
|
];
|
||||||
glucose.modules = [
|
glucose.modules = [
|
||||||
({
|
({
|
||||||
|
|
76
sway.mod.nix
76
sway.mod.nix
|
@ -126,45 +126,47 @@
|
||||||
"0:audio" = [ { title = "pwvucontrol$|noisetorch$"; } ];
|
"0:audio" = [ { title = "pwvucontrol$|noisetorch$"; } ];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* colors = {
|
/*
|
||||||
# should probably use a let ... in ... here
|
colors = {
|
||||||
background = "#212121";
|
# should probably use a let ... in ... here
|
||||||
focused = {
|
|
||||||
# border = "#2b83a6"; # test to see if stylix does magic
|
|
||||||
background = "#2b83a6";
|
|
||||||
text = "#ffffff";
|
|
||||||
indicator = "#dddddd";
|
|
||||||
childBorder = "#2b83a6";
|
|
||||||
};
|
|
||||||
focusedInactive = {
|
|
||||||
border = "#212121";
|
|
||||||
background = "#212121";
|
background = "#212121";
|
||||||
text = "#86888c";
|
focused = {
|
||||||
indicator = "#292d2e";
|
# border = "#2b83a6"; # test to see if stylix does magic
|
||||||
childBorder = "#5a5a5a";
|
background = "#2b83a6";
|
||||||
|
text = "#ffffff";
|
||||||
|
indicator = "#dddddd";
|
||||||
|
childBorder = "#2b83a6";
|
||||||
|
};
|
||||||
|
focusedInactive = {
|
||||||
|
border = "#212121";
|
||||||
|
background = "#212121";
|
||||||
|
text = "#86888c";
|
||||||
|
indicator = "#292d2e";
|
||||||
|
childBorder = "#5a5a5a";
|
||||||
|
};
|
||||||
|
unfocused = {
|
||||||
|
border = "#212121";
|
||||||
|
background = "#212121";
|
||||||
|
text = "#86888c";
|
||||||
|
indicator = "#292d2e";
|
||||||
|
childBorder = "#5a5a5a";
|
||||||
|
};
|
||||||
|
urgent = {
|
||||||
|
border = "#d64e4e";
|
||||||
|
background = "#d64e4e";
|
||||||
|
text = "#ffffff";
|
||||||
|
indicator = "#d64e4e";
|
||||||
|
childBorder = "#d64e4e";
|
||||||
|
};
|
||||||
|
placeholder = {
|
||||||
|
border = "#212121";
|
||||||
|
background = "#0c0c0c";
|
||||||
|
text = "#ffffff";
|
||||||
|
indicator = "#212121";
|
||||||
|
childBorder = "#262626";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
unfocused = {
|
*/
|
||||||
border = "#212121";
|
|
||||||
background = "#212121";
|
|
||||||
text = "#86888c";
|
|
||||||
indicator = "#292d2e";
|
|
||||||
childBorder = "#5a5a5a";
|
|
||||||
};
|
|
||||||
urgent = {
|
|
||||||
border = "#d64e4e";
|
|
||||||
background = "#d64e4e";
|
|
||||||
text = "#ffffff";
|
|
||||||
indicator = "#d64e4e";
|
|
||||||
childBorder = "#d64e4e";
|
|
||||||
};
|
|
||||||
placeholder = {
|
|
||||||
border = "#212121";
|
|
||||||
background = "#0c0c0c";
|
|
||||||
text = "#ffffff";
|
|
||||||
indicator = "#212121";
|
|
||||||
childBorder = "#262626";
|
|
||||||
};
|
|
||||||
};*/
|
|
||||||
|
|
||||||
bars = [
|
bars = [
|
||||||
({
|
({
|
||||||
|
|
Loading…
Reference in a new issue