summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Dockerfile8
-rw-r--r--assets/theme/main.scss93
-rw-r--r--content/_index.md22
-rw-r--r--content/posts/2018/2018-05-07-sparrowdo-getting-started.md2
-rw-r--r--content/posts/2022/2022-08-06-installing-gentoo-encrypted-zfs-efistub.md242
-rw-r--r--content/posts/2023/2023-02-23-the-woes-of-awsvpnclient.md91
-rw-r--r--content/posts/2023/2023-03-08-using-laminar-for-selfhosted-ci.md64
-rw-r--r--content/posts/2023/2023-03-26-finally-templating-bashtard.md86
-rw-r--r--content/posts/2023/2023-05-23-bashtard-2.0.0.md110
-rw-r--r--content/posts/2023/2023-07-13-getting-emoji-to-work-in-kde-on-debian.md138
-rw-r--r--content/posts/2023/2023-07-24-new-server-rack-mieshu.md89
-rw-r--r--content/posts/2023/2023-08-05-new-server-rack-nouki.md88
-rw-r--r--content/posts/2023/2023-08-29-releasing-raku-modules-with-fez.md74
-rw-r--r--content/posts/2023/_index.md3
-rw-r--r--content/projects/_index.md7
-rw-r--r--content/projects/bashtard/_index.md17
-rw-r--r--content/projects/bashtard/releases/1.0.0.md9
-rw-r--r--content/projects/bashtard/releases/2.0.0.md68
-rw-r--r--content/projects/bashtard/releases/_index.md4
-rw-r--r--content/recipes/condiments/applesauce.md2
-rw-r--r--content/recipes/condiments/mayonnaise.md40
-rw-r--r--content/recipes/condiments/salsa.md59
-rw-r--r--content/recipes/condiments/sauce-mushroom.md65
-rw-r--r--content/recipes/dishes-hot/soup-mushroom-cream.md68
-rw-r--r--content/recipes/dishes-hot/soup-pea-halal.md86
-rw-r--r--content/recipes/dishes-hot/stew-dutch.md10
-rw-r--r--content/recipes/dishes-side/salad-stewed-beaf.md133
-rw-r--r--content/recipes/dishes-side/stewed-pears.md50
-rw-r--r--content/recipes/snacks/cheesecake-basque-burned.md9
-rw-r--r--content/services/_index.md3
-rw-r--r--content/services/fiche.md16
-rw-r--r--content/services/invidious.md8
-rw-r--r--content/services/nitter.md3
-rw-r--r--content/services/omgur.md5
-rw-r--r--content/services/searx.md8
-rw-r--r--content/services/searxng.md9
-rw-r--r--content/services/teddit.md3
-rw-r--r--layouts/_default/baseof.html8
-rw-r--r--layouts/posts/list.html8
-rw-r--r--layouts/posts/list.xml2
-rw-r--r--layouts/posts/single.html15
-rw-r--r--layouts/project-release/list.html14
-rw-r--r--layouts/project-release/list.xml27
-rw-r--r--layouts/project-release/single.html20
-rw-r--r--layouts/projects/list.html21
-rw-r--r--layouts/recipes/list.html33
-rw-r--r--layouts/recipes/single.html19
-rw-r--r--layouts/services/list.html17
48 files changed, 1898 insertions, 78 deletions
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..d909803
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,8 @@
+FROM alpine
+
+RUN apk add --no-cache hugo
+
+WORKDIR /usr/src
+COPY . .
+
+CMD [ "hugo", "serve", "--baseURL", "https://www.tyil.nl","--bind", "::" ]
diff --git a/assets/theme/main.scss b/assets/theme/main.scss
index 3221dd3..967e0da 100644
--- a/assets/theme/main.scss
+++ b/assets/theme/main.scss
@@ -1,7 +1,6 @@
// Variables
$mainWidth: 900px;
-
// Colors
@import url("/css/highlight-emacs.css") screen and (prefers-color-scheme: light);
@import url("/css/highlight-monokai.css") screen and (prefers-color-scheme: dark);
@@ -109,6 +108,77 @@ section.admonition {
}
}
+section.project {
+ border-top: 1px solid var(--bodyForegroundColor);
+ padding: 0.5rem 0.5rem;
+
+ h2 {
+ margin: 0.5rem 0rem;
+
+ small {
+ margin-left: 1.5rem;
+ font-weight: normal;
+ font-size: small;
+ }
+ }
+
+ p {
+ margin-top: 0;
+ }
+
+ nav.project {
+ a, a:visited {
+ margin: 0.5rem;
+ text-decoration: underline;
+ color: var(--bodyForegroundColor);
+ }
+
+ a::before {
+ content: "ยป ";
+ }
+ }
+}
+
+section.recipe {
+ border-top: 1px solid var(--bodyForegroundColor);
+ padding: 0.5rem;
+ display: grid;
+ grid-template-columns: [preview] 1fr [description] 5fr [end];
+
+ div.picture {
+ grid-column-start: preview;
+ grid-column-end: description;
+ }
+
+ div.description {
+ grid-column-start: description;
+ grid-column-end: end;
+ }
+
+ h2 {
+ margin: 0.5rem 0rem;
+ }
+}
+
+section.service {
+ border-top: 1px solid var(--bodyForegroundColor);
+ padding: 0.5rem;
+
+ h2 {
+ margin: 0.5rem 0rem;
+
+ small {
+ margin-left: 1.5rem;
+ font-weight: normal;
+ font-size: small;
+ }
+ }
+
+ p {
+ margin-top: 0;
+ }
+}
+
@media(min-width: $mainWidth) {
section.admonition {
display: grid;
@@ -145,7 +215,7 @@ section.admonition {
}
// Navigation bar
-nav {
+nav.main {
padding-bottom: 8px;
border-bottom: double var(--bodyForegroundColor);
text-align: center;
@@ -222,3 +292,22 @@ pre code {
margin-right: 1em;
}
}
+
+.tag {
+ color: var(--linkForegroundColor);
+ font-family: monospace;
+ text-decoration: none;
+ border-bottom: 1px dotted var(--linkForegroundColor);
+}
+.tag::before {
+ content: "#";
+}
+
+ul.taglist {
+ li {
+ display: inline;
+ }
+
+ margin: 0;
+ padding: 0;
+}
diff --git a/content/_index.md b/content/_index.md
index c5c62ad..ec0ccd7 100644
--- a/content/_index.md
+++ b/content/_index.md
@@ -44,6 +44,12 @@ I do not read my mailboxes very often, so please do not expect a timely
response. If you require a response as soon as possible, please find me on IRC
instead.
+#### Fediverse
+
+I host my own MissKey instance to interact with the wider Fediverse.
+
+- [`@tyil@fedi.tyil.nl`](https://fedi.tyil.nl/@tyil)
+
#### IRC
I am active on various IRC networks, most often under the nick `tyil`. All of
@@ -57,10 +63,18 @@ wish to have a real-time chat with me.
#### Matrix
-While I'm not fully convinced of Matrix yet, I have an active account on the
-network. I'd like to eventually self-host a homeserver, but for that to happen I
-would first like it to be actually good. For now, you can find me as
-`@tyil:matrix.org`.
+As the years have gone by, I've been losing faith in Matrix more and more. I
+still have an account, and I would be happy if it ever got good, but I
+personally am not counting on that to happen anymore.
+
+- `@tyil:matrix.org`
+
+#### XMPP
+
+If IRC is not your thing, I can be reached for personal chats on XMPP too.
+
+- `tyil@disroot.org`
+- `tyil@chat.tyil.nl`
## Other links
diff --git a/content/posts/2018/2018-05-07-sparrowdo-getting-started.md b/content/posts/2018/2018-05-07-sparrowdo-getting-started.md
index fa458c4..419e98d 100644
--- a/content/posts/2018/2018-05-07-sparrowdo-getting-started.md
+++ b/content/posts/2018/2018-05-07-sparrowdo-getting-started.md
@@ -210,7 +210,7 @@ sparrowdo --local_mode
{{< admonition title="note" >}}
If you want to run this on a remote machine to configure that one instead, you
can use `--host=<ip>` instead of `--local_mode`.
-{{< admonition >}}
+{{< / admonition >}}
You can check whether it actually worked by inspecting the files in
`/etc/dnsmasq.d` and your `/etc/resolv.conf`. The easiest way to check their
diff --git a/content/posts/2022/2022-08-06-installing-gentoo-encrypted-zfs-efistub.md b/content/posts/2022/2022-08-06-installing-gentoo-encrypted-zfs-efistub.md
new file mode 100644
index 0000000..2825b7c
--- /dev/null
+++ b/content/posts/2022/2022-08-06-installing-gentoo-encrypted-zfs-efistub.md
@@ -0,0 +1,242 @@
+---
+date: 2022-11-20
+title: "Installing Gentoo with encrypted ZFS rootfs and EFIstub kernel"
+tags:
+- GNU+Linux
+- Gentoo
+- Tutorial
+- ZFS
+---
+
+A little while ago, I got a new work laptop. As is customary, I installed my
+preferred GNU+Linux environment onto it. Consequently, a few people have asked
+me to detail my steps to get this system up and running, as they would like to
+try out a similar setup as I did. It's also been a while since I made another
+blog post, so here's killing two birds with one stone!
+
+## Preparing disks
+
+Make sure you get the right device name, or you'll purge the data on some other
+drive!
+
+```sh
+parted -a optimal /dev/nvme1n1
+mklabel gpt
+mkpart esp 1 5130
+mkpart rootfs 5130 -1
+set 1 boot on
+quit
+```
+
+### Get IDs of partitions
+
+For partitioning I've lately come to love using disk IDs, rather than their
+`/dev/sd*` entries. They're easy to look up, so copy them over to use them later
+on.
+
+```sh
+ls -l /dev/disk/by-id
+```
+
+- `nvme-eui.36483331545090280025385800000001-part1` -> ESP
+- `nvme-eui.36483331545090280025385800000001-part2` -> ZFS
+
+### Formatting
+
+#### ESP
+
+The ESP partition holds the kernel and initramfs, and _must_ be FAT32.
+
+```sh
+mkfs.vfat -F32 /dev/disk/by-id/nvme-eui.36483331545090280025385800000001-part1
+```
+
+#### zpool
+
+The zpool settings used here are the settings I used. You should verify these
+settings also work optimally for your setup! I generally name my pools after the
+device they're running from, in this case `ivdea`. Any name will work here, just
+make sure to be consistent later down the guide!
+
+```sh
+rm -f /etc/hostid && zgenhostid
+
+zpool create -f \
+ -O acltype=posixacl \
+ -O compression=lz4 \
+ -O dedup=off \
+ -O encryption=aes-256-gcm \
+ -O keyformat=passphrase \
+ -O keylocation=prompt \
+ -O relatime=on \
+ -O xattr=sa \
+ -R /mnt/gentoo \
+ -m none \
+ -o ashift=12 \
+ -o cachefile=/etc/zfs/zpool.cache \
+ ivdea0 \
+ /dev/disk/by-id/nvme-eui.36483331545090280025385800000001-part2
+
+zfs create -o mountpoint=none ivdea0/rootfs
+zfs create -o mountpoint=/ ivdea0/rootfs/gentoo
+zfs create -o mountpoint=none ivdea0/rootfs/gentoo/usr
+zfs create -o mountpoint=none ivdea0/rootfs/gentoo/var
+zfs create -o mountpoint=none ivdea0/rootfs/gentoo/var/lib
+zfs create -o mountpoint=none ivdea0/home
+zfs create -o mountpoint=/home/tyil ivdea0/home/tyil
+
+zpool set bootfs=ivdea0/rootfs/gentoo ivdea0
+```
+
+## Preparing chroot
+
+You will want to grab the latest Gentoo autobuild tarball for your architecture.
+I'm _not_ using systemd, if you do desire this for some reason, you may need to
+alter some steps.
+
+### Initial
+
+```sh
+cd /mnt/gentoo
+mkdir efi
+mount /dev/disk/by-id/nvme-eui.36483331545090280025385800000001-part1 efi
+wget $STAGE3 # Use whichever URL for the stage3 tarball you need
+tar xpf stage3*.tar.xz --xattrs-include='*.*' --numeric-owner
+```
+
+### Recovery
+
+This section is labeled "Recovery" to easily find it later, in case you need to
+go back into the chroot to fix up any issues that prevent you from booting it.
+
+```sh
+mkdir -p etc/zfs
+cp /etc/zfs/zpool.cache etc/zfs
+cp --dereference /etc/resolv.conf /mnt/gentoo/etc/
+mount -t proc /proc proc
+mount --rbind --make-rslave /sys sys
+mount --rbind --make-rslave /dev dev
+mount --rbind --make-rslave /run run
+chroot . /bin/bash -l
+```
+
+## Configuring the system
+
+The base system is now installed, and most of the following steps are for
+configuring it to actually work properly.
+
+### Portage
+
+Run the initial Portage tree download. This will use `webrsync`, you can
+configure it to use `git` at a later stage if desired.
+
+```sh
+mkdir -p /etc/portage/repos.conf
+cp /usr/share/portage/config/repos.conf /etc/portage/repos.conf/gentoo.conf
+emerge-webrsync
+```
+
+### Editor
+
+Ofcourse, you can stick to `nano`, but I've been a vim guy for a very long time
+now, and without it I feel sad. It is the first thing I install, to make the
+rest of the configuration easier to do, by virtue of having the best editor
+available.
+
+```sh
+emerge vim
+```
+
+Once `vim` (or whichever worse editor you prefer) is installed, you can go
+around editing configuration files as needed.
+
+### locale
+
+Enable all the locales you desire in `/etc/locale.gen`. Once all the desird
+locales are uncommented, you can generate the locales with `locale-gen`. You
+will most likely also want to add the locales to the `L10N` variable in your
+`make.conf`.
+
+### timezone
+
+Set your timezone by making `/etc/localtime` a symlink to the timezone you use.
+
+```sh
+ln -fs /usr/share/zoneinfo/Europe/Amsterdam /etc/localtime
+```
+
+### hostname
+
+Set the machine's short hostname in `/etc/conf.d/hostname` first, then add your
+hostname aliases to `/etc/hosts`.
+
+```txt
+# /etc/conf.d/hostname
+hostname="ivdea"
+
+# /etc/hosts
+127.0.0.1 ivdea.tyil.net ivdea
+::1 ivdea.tyil.net ivdea
+```
+
+### kernel
+
+{{< admonition title="Note" >}}
+This will build the initramfs twice, since emerging gentoo-kernel will build it
+automagically. This can be "fixed" by removing a USE flag, but this is easier to
+me.
+{{</ admonition >}}
+
+By the time you're reading this, the kernel version used here is probably
+outdated. You will want to update it to whichever kernel version you're going to
+use.
+
+```sh
+emerge \
+ busybox \
+ dracut \
+ efibootmgr \
+ gentoo-kernel \
+ intel-microcode \
+ linux-firmware
+
+emerge sys-fs/zfs-kmod sys-fs/zfs
+emerge --config gentoo-kernel
+
+rc-update add zfs-import boot
+rc-update add zfs-mount boot
+rc-update add zfs-share default
+rc-update add zfs-zed default
+
+zgenhostid
+
+cp /boot/vmlinuz-5.15.59-gentoo-dist /efi/efi/gentoo/vmlinuz-5.15.59-gentoo-dist.efi
+cp /boot/initramfs-5.15.59-gentoo-dist /efi/efi/gentoo/initramfs-5.15.59-gentoo-dist.img
+
+efibootmgr \
+ --disk /dev/disk/by-id/nvme-eui.36483331545090280025385800000001 \
+ --part 1 \
+ --create \
+ --label "Gentoo ZFS 5.15.59" \
+ --loader 'efi\gentoo\vmlinuz-5.15.59-gentoo-dist.efi' \
+ --unicode \
+ 'dozfs root=ZFS=ivdea0/rootfs/gentoo ro initrd=\efi\gentoo\initramfs-5.15.59-gentoo-dist.img encrypted'
+```
+
+### Root password
+
+Set the root password using `passwd`. This would also be a good time to add any
+other users you want to use, and configure them with the correct permissions and
+groups.
+
+## Misc
+
+If you have any other software requirements, such as wireless network management
+or privilege escalation utilities, this is the most appropriate time to install
+and configure them.
+
+## Reboot
+
+Now you can reboot into the system, and be done with this guide. If anything
+isn't working properly, return to the "Recovery" step and fix any outstanding
+issues.
diff --git a/content/posts/2023/2023-02-23-the-woes-of-awsvpnclient.md b/content/posts/2023/2023-02-23-the-woes-of-awsvpnclient.md
new file mode 100644
index 0000000..a852793
--- /dev/null
+++ b/content/posts/2023/2023-02-23-the-woes-of-awsvpnclient.md
@@ -0,0 +1,91 @@
+---
+date: 2023-02-23
+title: The Woes of AWSVPNClient
+tags:
+- Amazon
+- AWS
+- AWSVPNClient
+---
+
+For my current `$dayjob` I am required to start using the AWS VPN Client. This
+is not a problem per se, however, this piece of software has given me some
+particular headaches. In this post, I want to air some frustrations that it has
+brought me in the past two days, trying to get this software working properly
+on Debian.
+
+## GNU+Linux Support
+
+The AWS VPN Client has gotten an official client for GNU+Linux users. Not all
+of them, sadly, they specifically support Ubuntu 18.04. I find it important to
+note that this is 2 LTS versions behind the current Ubuntu version 22.04. Apart
+from that, only Ubuntu is rather limited. Amazon isn't a small company, and
+they should be able to support various distributions.
+
+In general I would recommend to support the upstream distribution, which in
+this case would be Debian. This would ensure that it becomes available on
+Ubuntu by virtue of it being Debian based.
+
+That said, only Ubuntu packages wouldn't be a huge problem if not for the next
+issue I have with this software...
+
+## Proprietary Software
+
+The code for this application is private, and Amazon has no intention to change
+this. There's nothing very special about the application, it's just a
+proprietary wrapper around OpenVPN, so in my mind I find it hard to believe
+that they're trying to "protect" anything sensitive. It feels like a simple
+move to instill the idea that you're highly dependent on them.
+
+If they _were_ to make this software free (as in freedom), packaging could be
+done by package maintainers, or really just anyone who feels like doing it.
+This would remove a burden on Amazon, and ensure better availability for all
+potential users.
+
+Additionally, it would make debugging issues much easier. Because...
+
+## Logging
+
+The logging the application does is pathetic. There's a lot of duplicated logs
+that are spammed hundreds of times per second. Tailing your logs can also be
+more annoying than it needs to be, since the client rotates which file it logs
+to every 1048629 bytes.
+
+I currently have 30 log files, generated by two sessions. In these log files,
+the line `[INF] Begin receive init again` appears 509114 times. Over _half a
+million_ times. The total number of log lines in all these log files is 510394,
+meaning only 1280 lines are something different.
+
+Of those 1280 lines, the logs themselves aren't much better. I apparently had
+to install `systemd-resolved` in order to fix the following error:
+
+```txt
+2023-02-23 10:02:50.870 +01:00 [DBG] CM received: >LOG:1677142970,F,WARNING: Failed running command (--up/--down): external program exited with error status: 1
+>FATAL:WARNING: Failed running command (--up/--down): external program exited with error status: 1
+
+2023-02-23 10:02:50.870 +01:00 [DBG] CM processsing: >LOG:1677142970,F,WARNING: Failed running command (--up/--down): external program exited with error status: 1
+2023-02-23 10:02:50.870 +01:00 [DBG] CM processsing: >FATAL:WARNING: Failed running command (--up/--down): external program exited with error status: 1
+2023-02-23 10:02:50.870 +01:00 [DBG] Fatal exception occured
+2023-02-23 10:02:50.870 +01:00 [DBG] Stopping openvpn process
+2023-02-23 10:02:50.870 +01:00 [DBG] Sending SIGTERM to gracefully shut down the OpenVPN process
+2023-02-23 10:02:50.871 +01:00 [DBG] Invoke Error
+2023-02-23 10:02:50.871 +01:00 [DBG] DeDupeProcessDiedSignals: OpenVPN process encountered a fatal error and died. Try connecting again.
+```
+
+It is not particularly clear this fails due to not having `systemd-resolved`
+installed and running. The `.deb` provided by Amazon does not even depend on
+`systemd-resolved`!
+
+Another gripe I've had with the logs is their location. It saves these in
+`~/.config/AWSVPNClient/logs`. It may seem weird since this path contains a
+directory named `.config`, and indeed, this is not a great place to store logs.
+The [XDG Base Directory
+Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html)
+specifies `$XDG_STATE_HOME`, with one explicit example for it being logs.
+However, for this to make sense, the application needs to respect the `XDG_*`
+values to begin with, which it currently doesn't.
+
+## All in all
+
+This software is pretty bad, but if it were free software, at least the users
+could improve it to suck less, and easily introduce support for various
+additional platforms. Instead, we're just stuck with a piece of bad software.
diff --git a/content/posts/2023/2023-03-08-using-laminar-for-selfhosted-ci.md b/content/posts/2023/2023-03-08-using-laminar-for-selfhosted-ci.md
new file mode 100644
index 0000000..7158ed1
--- /dev/null
+++ b/content/posts/2023/2023-03-08-using-laminar-for-selfhosted-ci.md
@@ -0,0 +1,64 @@
+---
+date: 2023-03-08
+title: Using Laminar for Self-hosted CI
+tags:
+- Bash
+- CI
+- Git
+- GNU+Linux
+---
+
+I've hosted my [own git repositories](https://git.tyil.nl) for quite a while,
+but I hadn't found a simple self-hosted CI solution yet. I've tried several,
+and found them to be a bit too cumbersome to setup and actually put to use. The
+majority requires you to host a full "git forge", such as GitLab or Gitea, in
+order to use their webhook functionality in order to trigger a CI build. This
+didn't seem worth the effort to me, so I kept looking for an alternative that
+worked well for me.
+
+I think I've finally found one in [Laminar](https://laminar.ohwg.net/), after a
+suggestion from a friend on the Fediverse. I do wonder how I could've spent so
+much time searching without ever finding this solution!
+
+Laminar itself was easy to install from source, but another person chimed in to
+let me know they already made an `ebuild` for it, which is available in their
+overlay, making it even easier for me to try out. A single `emerge laminar`,
+and a couple seconds of building it, and I was ready to start trying it out.
+
+Configuration of jobs is done through scripts in whichever language you prefer,
+giving you quite a bit of power. The documentation seems to mostly use Bash,
+and that seemed to be a logical choice for me too, so that's what I've been
+playing with as well.
+
+Running jobs itself is as easy as `laminarc queue <name>`. It can't be much
+simpler, and this CLI interface makes it very easy to start a new job from a
+git `post-receive` hook. I wrote one which also shows the URL of the job's logs
+whenever I push new comments to [the Bashtard
+repository](https://git.tyil.nl/bashtard/about/).
+
+{{<highlight bash>}}
+while read old new ref
+do
+ laminarc queue bashtard \
+ "GIT_BRANCH=$ref" \
+ "GIT_COMMIT=$new" \
+ | awk -F: '{ print "https://ci.tyil.nl/jobs/"$1"/"$2 }'
+done
+{{</highlight>}}
+
+Using this, I can verify a job started, and immediately go to the page that
+shows the logs. I plan to use Laminar's post-job script to leverage `ntfy` to
+send me a notification on failed builds.
+
+Since all the worthwhile configuration for Laminar is just plain text, it is
+also very easy to manage in your preferred configuration management system,
+which is also something I plan to do in the nearby future.
+
+One slight annoyance I have so far is that I can't use (sub)directories for all
+the job scripts. Since I don't have many yet, this isn't a real problem yet,
+but it could pose a minor issue in the far future once I've written more job
+scripts.
+
+Given that that's the only "issue" I've found thus far, after a couple days of
+playing with it, I'd highly recommend taking a look at it if you want to set up
+a CI system for your self-hosted git repositories!
diff --git a/content/posts/2023/2023-03-26-finally-templating-bashtard.md b/content/posts/2023/2023-03-26-finally-templating-bashtard.md
new file mode 100644
index 0000000..b80270c
--- /dev/null
+++ b/content/posts/2023/2023-03-26-finally-templating-bashtard.md
@@ -0,0 +1,86 @@
+---
+date: 2023-03-29
+title: Finally, Templating in Bashtard!
+tags:
+- Bash
+- Bashtard
+- FreeBSD
+- GNU+Linux
+---
+
+In the past year, I've written Bashtard, a simple configuration system written
+in Bash to minimize the required dependencies, and to have a better system to
+handle different distributions/OSs in your cluster. Especially the past two
+months I've done quite a bit of work on it. I've worked out how to do reusable
+playbooks, generate a usable Debian package from the Makefile, extend the
+supported platforms, and more. And now, I've finally found a library to improve
+templating functionality, [Bash Pure Template](https://github.com/husixu1/bpt).
+
+When I originally started Bashtard I had looked around for nice and simple
+templating solutions that I could use. Sadly, pretty much all the available
+results required me to add dependencies, or couldn't really do more than what I
+did using `sed` and `awk`.
+
+For a long time, I had accepted that the kind of system that I wanted didn't
+exist, and I wasn't interested in making it myself at the time. Last night,
+however, I decided to just give it a quick search to see if anything had
+changed, and BPT popped up somewhere in my results. Having a quick look through
+the documentation made me very interested, it seemed to have all the features I
+desired, while still sticking to utilities I've already accepted for Bashtard.
+
+With one small exception, `md5sum`. This utility is not available on the FreeBSD
+systems I maintain. On FreeBSD, this tool is called `md5`, and has different
+options it can use. On the bright side, both `md5sum` and `md5` accept the
+content to be hashed on `STDIN`, and will write the hash to `STDOUT`.
+Additionally, Bashtard already contains logic to deduce what kind of system it
+is running on.
+
+And so I decided it's worth a try. There's only 5 references to `md5sum`, and
+the all happen in the same function, `bpt.fingerprint`. I've added an extra
+variable, `util`, and a `case...esac` to set this variable.
+
+```bash
+local util
+
+case "${BASHTARD_PLATFORM[key]}" in
+ freebsd) util=md5 ;;
+ linux-*) util=md5sum ;;
+ *)
+ debug "bpt/fingerprint" "Falling back to md5sum for hashing"
+ util=md5sum
+ ;;
+esac
+```
+
+After that, just replace all the `md5sum` invocations with `"$util"`. And a
+quick test later, it seems to function just fine. Implementing BPT as a library
+was incredibly straightforward too.
+
+```bash
+. "$BASHTARD_LIBDIR/vendor/bpt.bash"
+
+file_template_bpt()
+{
+ local file
+
+ file="$1" ; shift
+
+ eval "$* bpt.main ge \"$file\""
+}
+```
+
+The `eval` is a bit icky, but it saves me from polluting the environment
+variables through various `export`s.
+
+Another small adjustment I've made to BPT is the shebang. Upstream uses
+`#!/bin/bash`, but this is incorrect on some systems, including FreeBSD. It uses
+`#!/usr/bin/env bash` in the Bashtard version. Additionally, the upstream
+repository uses `.sh` as the file extension, which I've updated to be `.bash` to
+more accurately reflect which shell it is used with. Upstream also uses a
+4-space indent, which I've left as-is for now, since indentation is more of a
+personal choice, even if that choice is wrong. Finally, I added 3 `shellcheck
+disable` rules to make shellcheck happy.
+
+After some playbook testing on my own systems, I can say that BPT works pretty
+well so far, and I'm very glad the author made it available as free software.
+Thanks!
diff --git a/content/posts/2023/2023-05-23-bashtard-2.0.0.md b/content/posts/2023/2023-05-23-bashtard-2.0.0.md
new file mode 100644
index 0000000..654435f
--- /dev/null
+++ b/content/posts/2023/2023-05-23-bashtard-2.0.0.md
@@ -0,0 +1,110 @@
+---
+date: 2023-05-23
+title: Bashtard v2.0.0
+tags:
+- Bash
+- Bashtard
+- FreeBSD
+- GNU+Linux
+---
+
+A little over a year ago I started on a project to create my own configuration
+management system. I've been disappointed with existing alternatives, such as
+Ansible, on the grounds that they don't work all that well if you have a mix of
+different distros with different package managers, and sometimes even different
+paths to store data in.
+
+I've been having a lot of fun working on it, since the limitations I've put on
+it result in having to solve some problems in different ways than I would in a
+full-fledged programming language. These limitations also keep things pretty
+simple, and ensure that most of the features I have worked on need little to no
+additional effort to run on all the different systems I use for my computing
+needs.
+
+And now, a year later, I feel confident enough about a new release. There's some
+small backwards incompatible changes, so a new major release version is the way
+to go. [Bashtard v2.0.0](https://www.tyil.nl/projects/bashtard/releases/2.0.0/)
+is now available. There are a few big things that I want to go into a little
+bit, but you can also find a full list of changes in the changelog included on
+the release page.
+
+# Templating
+
+After using the templating features I [wrote about]() last month, I've decided
+to _not_ include it into Bashtard. I am not convinced after using it in practice
+that it adds enough value to warrant the size of the added code, and hassling
+with two licenses instead of one. I am still very much open to the idea of a
+good base templating engine, but for now you can always install `jinja2` or
+something on the target machine, and call that manually. The new
+`playbook_path()` function should make it easy to generate the path to your
+playbook's files.
+
+# Additional `$BASHTARD_*` vars
+
+Apart from having a new key in `$BASHTARD_PLATFORM` called `init`, there's a
+completely new variable in this version: `$BASHTARD_PLAYBOOK_VARS`. Currently,
+it's only used to set a given variable as required, but can be extended in the
+future with other kinds of checks. This allows playbooks to define some data to
+be required for it to run, and have it refuse to run if those are not supplied,
+rather than having to manually check them when the playbook runs. This is mainly
+intended for use with playbooks you intend to share, so that other people can
+get reasonable feedback as to what they _need_ to configure, vs what they _can_
+configure.
+
+# Re-usable playbooks
+
+So let's talk about one of the more important updates to Bashtard. At least, in
+my opinion. How playbooks are being used has been altered slightly, in order to
+allow a little easier re-use of them. I consider this a very important feature
+of any configuration management system, the ability to share your playbooks with
+others easily, and being able to use other people's playbooks with minimal
+effort. This greatly reduces the barrier to get started, and encourages people
+to show off what they've made.
+
+The current implementation is built upon git submodules, and the `bashtard pull`
+command will take them into account. Perhaps I'll add an `import` subcommand in
+the future to abstract the git submodule effort away, as I know that many people
+find it difficult to work with. However, since `git` is already ingrained in
+Bashtard, this addition keeps dependencies low, and allows me to keep the
+complexity out of the Bash code.
+
+# data.d
+
+Having re-usable playbooks introduced the need to have a place for data that is
+important to my setup, but completely useless to someone else's setup. For this,
+the `data.d` directory was added. You can store information that should be
+preserved across sync runs on your machines, but are not a good fit to keep in
+the actual playbook itself. I personally use it for my
+[`vpn-tinc`](https://git.tyil.nl/bashtard/vpn-tinc/) playbook to keep the host
+files in.
+
+Another use-case for this directory is without a playbook at all. You can put a
+regular directory in it, and symlink to it from a host system to keep a given
+directory in sync across all your machines. In my case, I have an `etc-nixos`
+directory in my `data.d` directory. On my NixOS system I have a symlink from
+`/etc/nixos` to `/etc/bashtard/data.d/nixos`. If I ever continue with NixOS, I
+can have this on all systems, and share any `.nix` files across all machines.
+
+# Binary packages!
+
+Lastly, I've [written
+about](https://www.tyil.nl/post/2023/03/08/using-laminar-for-self-hosted-ci/)
+Laminar before. I'm still using it, and I'm still very happy with its
+simplicity. Since setting it up I've added jobs to verify my Bashtard code with
+`shellcheck`, and if it passes, it'll queue up additional jobs to create a
+`.tar.gz` distribution and a `.deb` distribution. I hope to expand this to also
+generate binaries for use with Alpine, FreeBSD, and Archlinux. I've recently set
+up an S3-compatible object storage,
+
+Additionally, I've recently set up an S3-compatible object store, which Laminar
+should push such artifacts to immediately. This will simplify new releases of
+any software, and offload this kind of storage to an actual remote server,
+rather than hosting `dist.tyil.nl` directly from my desktop.
+
+# Wrapping up
+
+All in all, I've been very happy with Bashtard so far, and I've been having a
+_lot_ of fun working on it. I hope to be able to continue working on it and
+making it even better that it is in this release.
+
+Thanks for reading, and perhaps even using Bashtard!
diff --git a/content/posts/2023/2023-07-13-getting-emoji-to-work-in-kde-on-debian.md b/content/posts/2023/2023-07-13-getting-emoji-to-work-in-kde-on-debian.md
new file mode 100644
index 0000000..a5b0980
--- /dev/null
+++ b/content/posts/2023/2023-07-13-getting-emoji-to-work-in-kde-on-debian.md
@@ -0,0 +1,138 @@
+---
+date: 2023-07-13
+title: Getting Emoji to Work in KDE on Debian
+tags:
+- Debian
+- GNU+Linux
+- KDE
+---
+
+This is going to be a relatively short and uninteresting post for most, it'll
+just document how to get emoji to work in KDE.
+
+While it will work with most applications out of the box, this doesn't appear to
+work in Qt applications by default, including the notification panel. As I use
+my notifications for messages I get from my work chat, and I dislike seeing the
+squares, I set out to find the solution. I've had to string together a couple
+sources of information to get to the correct setup, and this blog post intends
+to show just the useful bits. So here goes!
+
+You'll need an emoji font (in my case `fonts-noto-color-emoji`), add two
+configuration files for fontconfig, rebuild the fontconfig cache, and most
+likely log out and back into KDE. Installing the emoji font is probably the easy
+bit and won't need any additional explanation I hope. So let's get started on
+the first configuration file, which will enable the Noto emoji font to be used,
+and also force it to be used in favour of other emoji fonts if any application
+was using that specifically. I have it saved as
+`/etc/fonts/conf.d/75-noto-color-emoji.conf`.
+
+```xml
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
+<fontconfig>
+ <!-- Add generic family. -->
+ <match target="pattern">
+ <test qual="any" name="family"><string>emoji</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+
+ <!-- This adds Noto Color Emoji as a final fallback font for the default font families. -->
+ <match target="pattern">
+ <test name="family"><string>sans</string></test>
+ <edit name="family" mode="append"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test name="family"><string>serif</string></test>
+ <edit name="family" mode="append"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test name="family"><string>sans-serif</string></test>
+ <edit name="family" mode="append"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test name="family"><string>monospace</string></test>
+ <edit name="family" mode="append"><string>Noto Color Emoji</string></edit>
+ </match>
+
+ <!-- Block Symbola from the list of fallback fonts. -->
+ <selectfont>
+ <rejectfont>
+ <pattern>
+ <patelt name="family">
+ <string>Symbola</string>
+ </patelt>
+ </pattern>
+ </rejectfont>
+ </selectfont>
+
+ <!-- Use Noto Color Emoji when other popular fonts are being specifically requested. -->
+ <match target="pattern">
+ <test qual="any" name="family"><string>Apple Color Emoji</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Segoe UI Emoji</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Segoe UI Symbol</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Android Emoji</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Twitter Color Emoji</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Twemoji</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Twemoji Mozilla</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>TwemojiMozilla</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>EmojiTwo</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Emoji Two</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>EmojiSymbols</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+ <match target="pattern">
+ <test qual="any" name="family"><string>Symbola</string></test>
+ <edit name="family" mode="assign" binding="same"><string>Noto Color Emoji</string></edit>
+ </match>
+</fontconfig>
+```
+
+The second configuration file, saved as `/etc/fonts/conf.d/local.conf`, simply
+adds the Noto emoji font as a fallback. This enables the use of it when an emoji
+is going to be rendered.
+
+```xml
+<?xml version='1.0'?>
+<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
+<fontconfig>
+ <match target="pattern">
+ <edit name="family" mode="append">
+ <string>Noto Color Emoji</string>
+ </edit>
+ </match>
+</fontconfig>
+```
+
+And after this, a relog of your (graphical) session should be all that is needed
+in order to make it work. You can easily test it with `notify-send`, or trying
+to render some emoji in `konsole`.
diff --git a/content/posts/2023/2023-07-24-new-server-rack-mieshu.md b/content/posts/2023/2023-07-24-new-server-rack-mieshu.md
new file mode 100644
index 0000000..f024784
--- /dev/null
+++ b/content/posts/2023/2023-07-24-new-server-rack-mieshu.md
@@ -0,0 +1,89 @@
+---
+date: 2023-07-23
+title: "My New Server Rack: Mieshu"
+tags:
+- GNU+Linux
+- Gentoo
+- Systemd
+- Garage
+---
+
+After saving up for a long while and thinking about what I want in my new home,
+I have finally taken the leap and gotten myself a server rack for home use. Its
+has a 15U capacity, which should be plenty to get started, but this same brand
+has larger racks too, in case I do want to upgrade it and keep the same style.
+
+That said, for now there's only two 4U units in them, one for (file) storage,
+and one for database purposes. I sadly don't have anything dedicated for
+workloads yet, so for now, both of these servers are intended to also run some
+light workloads. I haven't made my mind up yet on how to solve the workload
+issues. Now that I have a rack, I obviously want something rack-mountable, and
+I probably want it to run a Kubernetes cluster too.
+
+In this regard, I _could_ go for a set of [Raspberry Pi](https://www.raspberrypi.com/)
+units, there's [3U mounts that can hold up to 12 Raspberry Pi machines](https://www.uctronics.com/uctronics-19-inch-3u-rack-mount-for-raspberry-pi-4-with-8-mounting-plates.html),
+which would be a nice amount. However, I am not yet completely sold on full-ARM
+workloads, and I'm not entirely convinced of the power of Raspberry Pi units in
+general. I'd much rather standardize on another brand, [Odroid](https://www.hardkernel.com/),
+as they have more types of units available, and are not limited to just ARM. But
+since they're not the popular kid in class, there's very few off-the-shelf
+rack mounting equipment for it. I'll be thinking about this for just a bit more
+before making a decision.
+
+For now, though, I wanted to talk about the setup of the first server, Mieshu,
+who will be used as a storage server. Mieshu currently runs 8 HDDs, and 2 NVMe
+drives. One of the NVMe drives is used for the rootfs, and the other is used for
+caching in certain applications. The HDDs themselves offer the data storage
+capacity.
+
+The HDDs are currently comprised of four 16TB drives, and four 8TB drives. The
+smaller disks come from my desktop, Edephas, which used to serve as data storage
+until Mieshu took over. All disks are configured into pairs, which themselves
+make mirrors. This means I have four sets of mirror pools, two times 16TB, and
+two times 8TB, for a total of 48TB of storage. I'm currently using about half of
+this, and it should give me plenty of time before needing to increase the size
+again.
+
+I chose to use mirrors since it has a good chance of your data being recoverable
+on disk failure, and it allows me to buy disks per two, rather than in larger
+numbers. This hopefully keeps the cost of expansion within reasonable limits.
+The mirrors themselves are currently [ZFS](https://openzfs.org/wiki/Main_Page)
+pools, but I hope to be able to use [bcachefs](https://bcachefs.org/) very soon
+as well.
+
+Just a bunch of mirrors is rather inconvenient, however, so I'm also leveraging
+[MergerFS](https://github.com/trapexit/mergerfs) to combine all the mirrors into
+a single usable pool. This slightly odd setup was chosen over RAID-0 or RAID-Z*
+to lower the impact of disk failure. Even if two disks in the same mirror were
+the die at the same time, I wouldn't lose _all_ data, just the bits on that
+particular mirror. It would be very annoying, but it wouldn't be disastrous.
+
+Apart from generic mass storage, I also host S3 buckets for personal use. This
+is where I upload CI artifacts to, and [my MissKey instance](https://fedi.tyil.nl/@tyil)
+uses it for storing objects as well. Future services such as [Mimir](https://grafana.com/oss/mimir/)
+will probably leverage S3 for storage as well. This is achieved through
+[Garage](https://garagehq.deuxfleurs.fr/). I've also tried [SeaweedFS](https://seaweedfs.github.io/),
+which is a very neat project on its own, but Garage is just simpler to
+configure, and allows a replicated setup with only two servers, whereas SeaweedFS
+demands an odd number of master servers.
+
+And lastly, Mieshu runs [K3s](https://k3s.io/) for its Kubernetes component. It
+is currently not serving anything yet, as the other server is supposed to become
+the database server, which is needed for most workloads. Once that is up and
+running, Mieshu will start hosting things such as
+[Grafana](https://grafana.com/oss/grafana) and [Loki](https://grafana.com/oss/loki/),
+monitoring stuff basically. Perhaps I'll move [Laminar](https://laminar.ohwg.net/)
+to this server as well, but I'm unsure if I will run that as a Kubernetes service.
+
+The server itself runs on Gentoo, as it still is the most stable experience I
+can get out of any GNU+Linux distribution. I am, however, not using the default
+of OpenRC as the init system and service manager. For the first time, I'm
+running Gentoo with systemd. After several years, it appears to have become
+stable enough to trust with serious workloads. With its increased use, however,
+some things have become simpler by just using systemd. I hope to get a better
+understanding of it, and learn to bend it to my will as needed, by simply using
+it on my own systems.
+
+I hope to have time to work on the other server sooner rather than later, so
+I can finish up the base of my new setup. Be on the lookout for the next post,
+where I'll go into detail on Nouki, the database server.
diff --git a/content/posts/2023/2023-08-05-new-server-rack-nouki.md b/content/posts/2023/2023-08-05-new-server-rack-nouki.md
new file mode 100644
index 0000000..fbbc326
--- /dev/null
+++ b/content/posts/2023/2023-08-05-new-server-rack-nouki.md
@@ -0,0 +1,88 @@
+---
+date: 2023-08-05
+title: "My New Server Rack: Nouki"
+tags:
+- GNU+Linux
+- Gentoo
+- PostgreSQL
+- Prometheus
+- Systemd
+- ZFS
+---
+
+After setting up [mieshu](/post/2023/07/23/my-new-server-rack-mieshu/), nouki is
+the next server to work on in my home rack. Nouki is intended to live as my main
+database server, mainly for PostgreSQL, but perhaps later on in life MySQL if I
+ever want a service that doesn't support superiour databases.
+
+The setup for nouki is much simpler in that regard, the base system is almost
+identical. This server has ZFS with 2 NVMe disks running in a mirror
+configuration. It is also a Gentoo based system, and again with systemd rather
+than openrc. The experience of systemd with mieshu was much less painful than I
+anticipated. It would seem that it has had time to mature, though I still
+dislike how it kills diversity in init/service managers on GNU+Linux.
+
+Both PostgreSQL and ZFS have received some tweaking to run more smoothly. I'm no
+DBA, so if you see anything silly in here, do let me know so I can improve my
+life.
+
+For ZFS, tweaking was rather minimal. I've made a seperate dataset for
+PostgreSQL to use, with `recordsize=8K` as option. For PostgreSQL, I've altered
+a bit more. First and foremost, the `pg_hba.conf` to allow access from machines
+in my tinc-based VPN.
+
+```conf
+host all all 10.57.0.0/16 scram-sha-256
+```
+
+The `postgresql.conf` file received the following treatment, based solely on the
+guidance provided by [PGTune](https://pgtune.leopard.in.ua/).
+
+```conf
+listen_address = 10.57.101.20
+max_connections = 200
+shared_buffers = 8GB
+effective_cache_size = 24GB
+maintenance_work_mem = 2GB
+checkpoint_completion_target = 0.9
+wal_buffers = 16MB
+default_statistics_target = 100
+random_page_cost = 1.1
+effective_io_concurrency = 200
+work_mem = 5242kB
+min_wal_size = 1GB
+max_wal_size = 4GB
+max_worker_processes = 12
+max_parallel_workers_per_gather = 4
+max_parallel_workers = 12
+max_parallel_maintenance_workers = 4
+```
+
+With this, PostgreSQL seems to perform very well on this machine, applications
+using it are noticably faster. Sadly I have no timings from when it all ran on
+my desktop, so I cannot make an exact statement on how much faster everything
+has become.
+
+Additionally, I wanted to start gathering metrics of my machines and services,
+so I can start thinking about dashboards and alerts. I've chosen to use the
+current industry standard of Prometheus for this. Since I consider Prometheus to
+be a database for metrics, it has been deployed on my database server as well.
+
+Prometheus is currently set to scrape metrics from the `node_exporter` and
+`postgresql_exporter`, and seems to work fine. I expect I may need to tweak it
+in the future to configure how long I want metrics to be available, since I've
+seen it use quite a large amount of memory when storing a large amount of
+metrics for a very long time.
+
+To actually see the metrics and have alerts, I currently intend to go with
+Grafana. I already have ntfy running, and it appears relatively simple to mold
+Grafana alerts into ntfy notifications. To do this properly, I will require some
+machines to handle regular workloads. Most likely these will be Intel NUCs, or
+similar machines, as they draw very little power for reasonable performance.
+Raspberry Pi units would be cheaper, but also seem vastly less powerful, and I'd
+need to ensure all my intended workloads can run on ARM which could become a
+nuisance very quickly.
+
+As I already have an Intel NUC to play with, that's what I'll be doing for the
+coming few days to see if this can work for my desires. Perhaps I can try out a
+highly available cluster setup of K3s in the near future!
diff --git a/content/posts/2023/2023-08-29-releasing-raku-modules-with-fez.md b/content/posts/2023/2023-08-29-releasing-raku-modules-with-fez.md
new file mode 100644
index 0000000..edc1914
--- /dev/null
+++ b/content/posts/2023/2023-08-29-releasing-raku-modules-with-fez.md
@@ -0,0 +1,74 @@
+---
+date: 2023-08-29
+title: "Releasing Raku modules with fez"
+tags:
+- Argo
+- Raku
+---
+
+Last week I got a message on Matrix, asking me to update one of my
+[Raku](https://raku.org/) modules,
+[`Config::Parser::TOML`](https://git.tyil.nl/raku/config-parser-toml/). One of
+the dependencies had been updated, and the old one is no longer available
+through the module installer `zef`. Its not that big a change, and there are
+tests available, so its a reasonably small fix on itself.
+
+Recently I've set up [Argo Workflows](https://argoproj.github.io/workflows/) for
+my CI/CD desires, and I found this a good and simple Raku project to try and
+incorporate into a workflow. Since I had some additional quality checks ready to
+use in my workflow, this has resulted in [REUSE](https://reuse.software/)
+compliance for this Raku module, in addition to the regular `prove` tests
+already available in the project. Additionally, the de facto default module
+authoring tool `fez` also brings a few new checks that have been incorporated.
+
+While all that is good, there were some annoyances I encountered while
+configuring this. Notably, I've found `fez` to be a chore to work with when it
+comes to non-interactive use. All CI/CD jobs run in their own Kubernetes pods,
+and _should_ not require any interaction from myself during these runs. I am
+writing this blog post mainly to write down the annoyances I encountered, hoping
+that `fez` can be improved in the future.
+
+Lets start with the first issue I encountered while setting up the workflow:
+`zef install fez` fails by default. `zef` gives the advice to `--exclude` one of
+the dependencies, and going by the issues reported on their Github repository,
+this seems to be accepted workaround. However, I'd argue that this workaround
+should not be needed to begin with. Especially seeing as `fez` works fine and I
+have absolutely no clue what this `z` is or how I can supply it. Either drop
+this dependency, or document its use and upstream so people can package it.
+
+The second issue I encountered was with the `login` functionality of `fez`.
+There seems to be no way to handle this non-interactively. The way around this
+for me has become to use `expect` scripts, but this is obviously not very pretty
+and will break whenever the interactive interface of `fez` changes. A good means
+of non-interactive authentication would be great to have. I've considered to
+just mount `fez`'s config/cache into the containers, but the documentation warns
+that tokens aren't permanent to begin with.
+
+Next up there's the actual `upload` command. I'm running it twice in my
+workflow, once with `--dry-run` and once with `--force`. The first one is done
+as a preliminary quality check to see if there's any obvious issues that ought
+to be fixed beforehand. I noticed on a subsequent run (the one with `--force`)
+that the _dry_ run isn't all that dry. It leaves an `sdist` directory, which in
+turn will get included in the next step. There's a flag to create this `sdist`
+directory, but no flag to do the inverse. My solution is to end this step with
+`rm -fr -- sdist` to clean it up again.
+
+And lastly, when all quality assurance checks have passed, the `fez upload
+--force` command is ran on the working directory. I'd rather not force anything
+here, but the alternative is that another interactive question pops up and the
+job hangs forever. I don't know all the possible prompts `fez` can generate, and
+for this one I didn't even bother to try and look that up. Rather than a
+`--force` to practically say "yes" to everything, I'd prefer an option to say
+"no" to everything, failing the pipeline immediately.
+
+Another pet-peeve of mine is that `fez` seemingly doesn't use exit codes. No
+matter what happens, even something quite important such as `login` with
+incorrect credentials, it _always_ returns `0` as exit code. This should
+obviously be fixed sooner rather than later, as it is quite simple and it is the
+basis for _many_ systems to check the exit code to deduce something is wrong.
+
+Uploads of module updates are currently working, which is good, but I feel like
+a lot of workaround code I had to write should not be necessary. If `fez` can
+fix these issues, it will be much more of a breeze to use, which in turn
+hopefully encourages more automated testing and distributing of Raku modules.
+This can be a great boon for the module ecosystem and overall community.
diff --git a/content/posts/2023/_index.md b/content/posts/2023/_index.md
new file mode 100644
index 0000000..adf7d34
--- /dev/null
+++ b/content/posts/2023/_index.md
@@ -0,0 +1,3 @@
+---
+title: 2023
+---
diff --git a/content/projects/_index.md b/content/projects/_index.md
new file mode 100644
index 0000000..6907ad3
--- /dev/null
+++ b/content/projects/_index.md
@@ -0,0 +1,7 @@
+---
+title: Projects
+---
+
+This page lists all projects I actively work on, with some information about
+them, releases, any packages, and documentation to get people started on using
+them.
diff --git a/content/projects/bashtard/_index.md b/content/projects/bashtard/_index.md
new file mode 100644
index 0000000..9e31798
--- /dev/null
+++ b/content/projects/bashtard/_index.md
@@ -0,0 +1,17 @@
+---
+title: Bashtard
+repository: https://git.tyil.nl/bashtard
+languages:
+- Bash
+---
+
+Bashtard is a configuration management system built on the idea of simplicity
+for the user. It lets you write reasonably simple Bash scripts to configure
+your systems, while providing just enough abstractions to make it easy to
+work with various base systems.
+
+It is similar in purpose as other configuration management tools, such as
+Ansible and Puppet, however Bashtard tries to keep dependencies to a minimum
+while still providing some abstractions to make the process easier. This
+allows Bashtard to run in more constrained environments, with the abstractions
+allowing it to manage a varied array of systems in a single network.
diff --git a/content/projects/bashtard/releases/1.0.0.md b/content/projects/bashtard/releases/1.0.0.md
new file mode 100644
index 0000000..95b133d
--- /dev/null
+++ b/content/projects/bashtard/releases/1.0.0.md
@@ -0,0 +1,9 @@
+---
+title: Bashtard v1.0.0
+date: 2022-05-06
+type: project-release
+packages:
+ bashtard-1.0.0.tar.gz: https://dist.s3.tyil.nl/packages/bashtard/bashtard-1.0.0.tar.gz
+---
+
+This is the initial release of Bashtard.
diff --git a/content/projects/bashtard/releases/2.0.0.md b/content/projects/bashtard/releases/2.0.0.md
new file mode 100644
index 0000000..053637f
--- /dev/null
+++ b/content/projects/bashtard/releases/2.0.0.md
@@ -0,0 +1,68 @@
+---
+title: Bashtard v2.0.0
+date: 2023-05-22
+type: project-release
+packages:
+ bashtard-2.0.0.deb: https://dist.s3.tyil.nl/bashtard/bashtard-2.0.0/bashtard-2.0.0.deb
+ bashtard-2.0.0.tar.gz: https://dist.s3.tyil.nl/bashtard/bashtard-2.0.0/bashtard-2.0.0.tar.gz
+---
+
+### Added
+
+- The `var` subcommand is now referenced in `usage()`.
+- A `pkg` subcommand has been added, to allow for direct interaction with the
+ `pkg_*()` utilities provided by Bashtard.
+- `config_subkeys()` and `config_subkeys_for` have been added, to look up
+ subkeys defined in config files. These can help when you want to use a list
+ somewhere in your configuration.
+- A `backup` subcommand has been added. This backup system uses borg, which must
+ be installed, but should be generic enough to be usable by most people out of
+ the box.
+- The `Makefile` has been extended with targets for creating packages for
+ GNU+Linux distributions.
+- The `$BASHTARD_PLATFORM` variable now contains an additional entry, `init`, to
+ allow for handling different init systems on GNU+Linux in a cleaner fashion.
+- A `file_hash` utility function has been added. It currently uses `md5`, but is
+ written in such a fashion that this can easily be updated for the future. Its
+ intent is to encapsulate differences between naming and usage of hashing
+ utilities found on different systems.
+- A `dir_hash` utility function has been added, which will give you a hash based
+ on the file contents of a directory. This function will find files
+ recursively, calculate a hash for each of them, and then calculate a hash
+ based on the total result. The intended goal is to allow running before and
+ after templating some files, to deduce whether something actually changed.
+- A `diff` subcommand has been added to show all non-committed changes. It is a
+ convenience wrapper to avoid having to change directory and run `git diff` to
+ get an overview of all pending changes.
+- A `pull` subcommand has been added to only pull the latest changes into the
+ `$BASHTARD_ETCDIR`, without running `sync` on all the playbooks.
+- A new global variable, `$BASHTARD_PLAYBOOK_VARS` has been added. Currently,
+ its only purpose is to check for "required" variables to be used in the
+ playbook. Before an `add` or `sync`, any variables declared to be `required`
+ in the `$BASHTARD_PLAYBOOK_VARS` array will be checked to be non-empty. If any
+ are empty, an error will be thrown and the playbook will not be ran.
+- A new directory has been added, `data.d`, for data that should be shared
+ between playbook runs. This new directory is intended to create a clearer
+ seperation between a playbook and a user's specific data used by the playbook,
+ which in turn should make re-using playbooks easier.
+- A convenience function has been introduced, `playbook_path()`, which can give
+ you the absolute path to the playbook's base or data directory.
+- A `top` subcommand has been added to give some generic information of all
+ nodes known to Bashtard. It uses information from the `sysinfo` subcommand,
+ which it will pull in through an `ssh` invocation.
+
+### Changed
+
+- The `ssh` subcommand's configuration has been nested under `bashtard`, e.g.
+ `ssh.host` is now `bashtard.ssh.host`. It should also be correctly using this
+ value for establishing the SSH connection.
+- `svc_enable()` now checks for the `rc.d` file to exist before running `grep`
+ on it.
+- `pkg_*()` functions no longer _require_ a `pkg.*` value to be defined. If one
+ is not set explicitly, a warning will be generated, but the original name
+ passed to the `pkg_*()` function will be used by the host's package manager.
+- `datetime()` now always passes `-u` on to `date`.
+- All manpages now include a `NAME` section.
+- The `sync` subcomman will now `stash` any changes before it attempts to
+ `pull`. Afterwards, `stash pop` will be ran to apply the last `stash`ed
+ changes again.
diff --git a/content/projects/bashtard/releases/_index.md b/content/projects/bashtard/releases/_index.md
new file mode 100644
index 0000000..c98ddda
--- /dev/null
+++ b/content/projects/bashtard/releases/_index.md
@@ -0,0 +1,4 @@
+---
+title: Bashtard
+type: project-release
+---
diff --git a/content/recipes/condiments/applesauce.md b/content/recipes/condiments/applesauce.md
index 5275c3b..2b0603e 100644
--- a/content/recipes/condiments/applesauce.md
+++ b/content/recipes/condiments/applesauce.md
@@ -13,7 +13,7 @@ ingredients:
amount: 5
- label: Water
unit: liter
- amount: 100
+ amount: 1
- label: Cinnamon
stages:
diff --git a/content/recipes/condiments/mayonnaise.md b/content/recipes/condiments/mayonnaise.md
new file mode 100644
index 0000000..ec17290
--- /dev/null
+++ b/content/recipes/condiments/mayonnaise.md
@@ -0,0 +1,40 @@
+---
+title: Mayonnaise
+date: 2022-11-20
+preptime: 5
+cooktime: 10
+serves: 5
+tags:
+- condiment
+- vegetarian
+
+ingredients:
+- label: Egg
+ amount: 3
+- label: Lemon Juice
+ amount: 10
+ unit: grams
+- label: Mustard
+ amount: 25
+ unit: grams
+- label: Olive Oil (Mild)
+ amount: 200
+ unit: grams
+
+stages:
+- label: Preparing
+ steps:
+ - Seperate the egg whites from the yolks
+ - Put the yolks into a tall container for easy mixing with a stick blender
+ - Add the mustard to the container
+- label: Mixing
+ steps:
+ - Start mixing the ingredients in the tall container
+ - Slowly add in the oil, ensuring it all gets blended into a thick mass
+ - Continuously add in the oil until all is used
+ - Add in the lemon juice
+ - Mix for another minute or so to ensure the juice is incorporated properly
+---
+
+A simple sauce to go well with everything, though most popular with french
+fries.
diff --git a/content/recipes/condiments/salsa.md b/content/recipes/condiments/salsa.md
new file mode 100644
index 0000000..01b81e2
--- /dev/null
+++ b/content/recipes/condiments/salsa.md
@@ -0,0 +1,59 @@
+---
+title: Sweet and Spicy Salsa
+date: 2022-10-02
+draft: true
+tags:
+- snacks
+- sweet
+- spicy
+preptime: 15
+cooktime: 0
+serves: 4
+
+ingredients:
+- label: Bell Pepper
+ amount: 50
+ unit: grams
+- label: Black Pepper
+- label: Garlic
+ amount: 2
+ unit: cloves
+- label: Honey
+ amount: 1
+ unit: tablespoon
+- label: Jalapeno
+ amount: 25
+ unit: grams
+- label: Ketjap Manis
+ amount: 1
+ unit: tablespoon
+- label: Red Onion
+ amount: 50
+ unit: grams
+- label: Salt
+- label: Spring Onion
+ amount: 25
+ unit: grams
+- label: Tomato
+ amount: 100
+ unit: grams
+- label: Worcestershire Sauce
+ amount: 1
+ unit: tablespoon
+
+stages:
+- label: Preparations
+ steps:
+ - Chop all choppable ingredients into small bits.
+ - Combine all ingredients in a bowl.
+ - Mix around until it combines into a salsa.
+---
+
+A sweet and spicy salsa, great for parties.
+
+<!--more-->
+
+It is inspired by the cooking video from You Suck At Cooking, which teaches [the
+way of rgogsh](https://youtube.alt.tyil.nl/watch?v=HCNwSe3t8ek). This recipe has
+been made over time to create my favourite salsa, but you can easily swap a few
+ingredients around to get something that works for all sorts of parties.
diff --git a/content/recipes/condiments/sauce-mushroom.md b/content/recipes/condiments/sauce-mushroom.md
new file mode 100644
index 0000000..0865faf
--- /dev/null
+++ b/content/recipes/condiments/sauce-mushroom.md
@@ -0,0 +1,65 @@
+---
+title: Mushroom Sauce
+date: 2022-09-30
+tags:
+- condiments
+- savory
+preptime: 5
+cooktime: 25
+serves: 1
+
+ingredients:
+- label: Butter
+ amount: 50
+ unit: gram
+- label: Cream
+ amount: 150
+ unit: gram
+- label: Garlic
+ amount: 2
+ unit: clove
+- label: Mushrooms
+ amount: 100
+ unit: gram
+- label: Mustard
+ amount: 1
+ unit: teaspoon
+- label: Onion
+ amount: 1
+- label: Thyme
+ amount: 1
+ unit: teaspoon
+- label: White Wine
+ amount: 0.05
+ unit: liter
+- label: Worcestershire Sauce
+ amount: 0.02
+ unit: liter
+- label: Pepper
+- label: Salt
+
+stages:
+- label: Preparation
+ steps:
+ - Clean and cut the mushrooms.
+ - Finely dice the onions.
+ - Finely dice the garlic.
+ - Chop the thyme.
+- label: Cooking
+ steps:
+ - Melt the butter in a pan with a little bit of oil to prevent the butter from
+ burning.
+ - Add the onion and garlic and fry for about 1 minute.
+ - Add the mushrooms and fry until cooked, about 3 to 4 minutes.
+ - Add a pinch of salt
+ - Reduce the heat.
+ - Add the white wine to deglaze the pan.
+ - Add the cream to the pan.
+ - Add the mustard to the pan.
+ - Add the worcestershire sauce to the pan.
+ - Add the thyme to the pan.
+ - Add the pepper to the pan.
+ - Stir everything together, and let simmer for 10 to 15 minutes to thicken up.
+---
+
+A savory sauce to be served warm with your dish. Works very well for steaks.
diff --git a/content/recipes/dishes-hot/soup-mushroom-cream.md b/content/recipes/dishes-hot/soup-mushroom-cream.md
new file mode 100644
index 0000000..5a0d43e
--- /dev/null
+++ b/content/recipes/dishes-hot/soup-mushroom-cream.md
@@ -0,0 +1,68 @@
+---
+draft: true
+title: Cream of Mushroom Soup
+date: 2022-10-08
+preptime: 20
+cooktime: 60
+serves: 5
+tags:
+- hot
+- meal
+- soup
+- vegetarian
+
+ingredients:
+- label: Butter
+ amount: 25
+ unit: grams
+- label: Cream (40%)
+ amount: 750
+ unit: milliliter
+- label: Mushroom
+ amount: 500
+ unit: grams
+- label: Parsley
+- label: Parmesan
+ amount: 50
+ unit: grams
+- label: Vegetable Stock
+ amount: 100
+ unit: milliliter
+- label: Onion
+ amount: 200
+ unit: grams
+- label: Shallot
+ amount: 50
+ unit: grams
+
+stages:
+- label: Preparation
+ steps:
+ - Cut the onions in half circles.
+ - Cut the shallots into half circles.
+ - Cut the mushrooms into quarter slices.
+ - Grate the parmesan.
+ - Finely chop the parsley.
+- label: Caramelizing the Onion
+ steps:
+ - Get your soup pot, and add the butter to it.
+ - Set your stove to medium-high.
+ - Let the butter melt completely.
+ - Add the onions to the pot.
+ - Add the shallots to the pot.
+ - Cook until the onions become soft, about 5 minutes.
+ - Turn your stove to low-medium heat.
+ - Continue cooking while stirring occasionally, until the onions become brown.
+- label: Soup
+ steps:
+ - Deglaze the pot with the vegetable stock.
+ - Add the mushrooms.
+ - Cook until the mushrooms turn soft, about 10 minutes.
+ - Add the cream.
+ - Add the parmesan.
+ - Add the parsley.
+ - Stir everything together, and let cook for about 5 more minutes.
+ - Add salt and pepper to taste.
+---
+
+My own take of a cream-based mushroom soup.
diff --git a/content/recipes/dishes-hot/soup-pea-halal.md b/content/recipes/dishes-hot/soup-pea-halal.md
new file mode 100644
index 0000000..231b984
--- /dev/null
+++ b/content/recipes/dishes-hot/soup-pea-halal.md
@@ -0,0 +1,86 @@
+---
+title: Halal Pea Soup
+date: 2022-10-16
+preptime: 30
+cooktime: 120
+serves: 8
+tags:
+- hot
+- meal
+- soup
+- halal
+
+ingredients:
+- label: Vegetable Broth
+ amount: 3
+ unit: liter
+- label: Split peas (dried)
+ amount: 500
+ unit: grams
+- label: Beef ribs, including bone
+ amount: 400
+ unit: grams
+- label: Lamb strips, with fat
+ amount: 300
+ unit: grams
+- label: Smoked Sausage (chicken)
+ amount: 250
+ unit: grams
+- label: Carrot
+ amount: 350
+ unit: grams
+- label: Onion
+ amount: 200
+ unit: grams
+- label: Leek
+ amount: 150
+ unit: grams
+- label: Celery
+ amount: 100
+ unit: grams
+- label: Potato
+ amount: 250
+ unit: grams
+- label: Celeriac
+ amount: 300
+ unit: grams
+- label: Salt
+- label: Black Pepper
+- label: Parsley
+
+stages:
+- label: Preparation
+ steps:
+ - Cut the smoked sausage into slices.
+ - Cut all the vegetables into small bits.
+ - Caramelize the onion.
+- label: Base
+ steps:
+ - Pour the vegetable stock in the pot.
+ - Add the split peas.
+- label: Meats
+ steps:
+ - Add the beef ribs.
+ - Add the lamb strips.
+ - Bring the entire mixture to a soft boil.
+ - Let boil for 45 minutes, stirring every 5 minutes. Skim off any scum that
+ floats to the top.
+ - Remove the meats from the pot.
+ - Debone the beef ribs.
+ - Cut the lamb strips into smaller bits.
+ - Put the meats back into the pot.
+- label: Vegetables
+ steps:
+ - Add the all the vegetables.
+ - Add the parsley.
+ - Let boil for at least 1 hour, stirring every 5 to 10 minutes. The peas need
+ to be dissolved, and the soup should be a little thick.
+- label: Finishing Touches
+ steps:
+ - Add salt and pepper to taste.
+- label: Serving
+ steps:
+ - Let sit overnight, and serve the next day for optimal enjoyment.
+---
+
+A halal version of a famous Dutch winter meal, pea soup.
diff --git a/content/recipes/dishes-hot/stew-dutch.md b/content/recipes/dishes-hot/stew-dutch.md
index 6e7abdf..9806492 100644
--- a/content/recipes/dishes-hot/stew-dutch.md
+++ b/content/recipes/dishes-hot/stew-dutch.md
@@ -10,6 +10,7 @@ tags:
- beef
- hot
- meal
+- meat
ingredients:
- label: Beef
@@ -48,11 +49,15 @@ ingredients:
- label: Appelstroop
amount: 2
unit: tablespoons
+ links:
+ - https://nl.wikipedia.org/wiki/Appelstroop
- label: Mustard
amount: 3
unit: teaspoons
- label: Bay leaf
amount: 4
+- label: Pepper
+- label: Salt
- label: Smoked paprika
- label: Thyme
@@ -71,7 +76,7 @@ stages:
steps:
- Saute the onions in the pot
- Add in the garlic, cook for about half a minute
- - Add the leek to the pot, and eook for a minutes
+ - Add the leek to the pot, and cook for a minute
- Add the carrot to the pot
- Add the beer and beef stock to the pot
- Add the bay leaves to the pot
@@ -82,7 +87,8 @@ stages:
- Let this stew for about 6 hours, occasionally checking in to make sure its
simmering slowly. If too much liquid evaporates, you can add more water or
beef stock, the solids should be completely submerged
- - Add smoked paprika and thyme to reach a flavour you're comfortable with
+ - Add the spices (pepper, salt, smoked paprika, thyme) to reach your desired
+ flavour
- Add the potatoes, chickpeas, and beans to the pot
- Let it stew for another 90 - 120 minutes
---
diff --git a/content/recipes/dishes-side/salad-stewed-beaf.md b/content/recipes/dishes-side/salad-stewed-beaf.md
new file mode 100644
index 0000000..c7e6d15
--- /dev/null
+++ b/content/recipes/dishes-side/salad-stewed-beaf.md
@@ -0,0 +1,133 @@
+---
+title: Stewed Beef Salad
+date: 2022-11-20
+preptime: 30
+cooktime: 300
+serves: 14
+tags:
+- Dutch
+- cold
+- meat
+- beef
+- salad
+
+ingredients:
+- label: Stewed Beef
+ amount: 500
+ unit: grams
+- label: Mayonnaise
+ amount: 400
+ unit: grams
+ links:
+ - /recipes/condiments/mayonnaise/
+- label: Pickle
+ amount: 150
+ unit: grams
+- label: Potato
+ amount: 250
+ unit: grams
+- label: Carrot
+ amount: 150
+ unit: grams
+- label: Red Onion
+ amount: 150
+ unit: grams
+- label: Spring Onion
+ amount: 150
+ unit: grams
+- label: Capers
+ amount: 150
+ unit: grams
+- label: Egg
+ amount: 7
+- label: Paprika
+- label: Salt
+- label: Pepper
+- label: Garlic Powder
+
+stages:
+- label: Stewing
+ notes: |
+ This is a very simple means of stewing beef. You can adapt this to your
+ preferred recipe for stewed beef and use it all the same. Since this is the
+ longest process, you can perform all other steps in the meantime.
+ steps:
+ - Cut the beef into bite-sized cubes
+ - Sear the cubes of beef on all sides
+ - Put the seared beef in a pot
+ - Fill the pot with stock until all the beef is covered
+ - Add bay leaves to the pot
+ - Add apple syrup to the pot
+ - Add paprika to the pot
+ - Let the beef stew for about 4 hours
+- label: Chopping
+ notes: |
+ All the ingredients should be chopped to around the same size, around 2
+ millimeters big. The finer you chop, the smoother the eventual salad will
+ be.
+ steps:
+ - Chop the pickle
+ - Chop the red onion
+ - Chop the spring onion
+ - Chop the carrot
+ - Chop the potatoes
+- label: Cooking
+ notes: |
+ The cooking process removes the raw taste, and makes the ingredients
+ slightly softer. Depending on how finely you chopped the ingredients, this
+ process only has to take 1 or 2 minutes per ingredient.
+ steps:
+ - Bring a pot of water to a boil
+ - Put in the chopped carrot
+ - Boil until _just_ ready
+ - Remove the carrot from the pot
+ - Rinse the carrot in cold water until the carrot is completely cooled off
+ - Repeat the cooking steps for the potatoes
+ - Boil the eggs for about 9 minutes
+- label: Drying
+ notes: |
+ All the ingredients should be reasonably dry before mixing it all together,
+ or the salad will get watery and soggy. The method I use for drying all
+ these ingredients is to put them between sheets of paper towel, and press
+ down on it to expunge most of the moisture, then remove the paper towels.
+ steps:
+ - Dry the pickles
+ - Dry the red onion
+ - Dry the spring onion
+ - Dry the carrots
+ - Dry the potatoes
+ - Dry the capers
+- label: Combining
+ steps:
+ - Shred the stewed beef
+ - Grab a big bowl
+ - Add the pickles
+ - Add the red onion
+ - Add the spring onion
+ - Add the carrots
+ - Add the potatoes
+ - Add the capers
+ - Add the shredded beef
+ - Add the mayonnaise
+ - Mix together until combined into a cohesive salad
+ - Add salt, pepper, paprika, and garlic powder to taste
+- label: Serving
+ notes: |
+ You can obviously serve it in any way you desire, but this is how I
+ traditionally encountered it.
+ steps:
+ - Cut the boiled eggs in half
+ - Place the salad on a plate
+ - Add a boiled egg on top, cut side up
+ - Garnish with leftover pickle and spring onion
+---
+
+A small, hearty salad. Served cold, usually as a side-dish, but also works great
+as a little snack.
+
+<!--more-->
+
+If you use home-made mayonnaise, you can cook the egg whites in a scrambled
+fasion, and add it to the salad as well. This won't affect the flavour too much,
+but will make it a more filling snack, and you won't have to make meringue
+_again_.
diff --git a/content/recipes/dishes-side/stewed-pears.md b/content/recipes/dishes-side/stewed-pears.md
new file mode 100644
index 0000000..9312feb
--- /dev/null
+++ b/content/recipes/dishes-side/stewed-pears.md
@@ -0,0 +1,50 @@
+---
+title: Stewed Pears
+date: 2022-11-20
+preptime: 10
+cooktime: 180
+serves: 2
+tags:
+- cold
+- fruit
+- sweet
+- vegetarian
+
+ingredients:
+- label: Stewing Pears
+ amount: 400
+ unit: grams
+- label: Cinnamon
+ amount: 4
+ unit: grams
+- label: Light Caster Sugar
+ amount: 16
+ unit: grams
+- label: Strawberry Lemonade Syrup
+ amount: 20
+ unit: grams
+- label: Water
+
+stages:
+- label: Preparation
+ steps:
+ - Peel the pears, and remove the cores.
+ - Cut the pears into quarters.
+- label: Stewing
+ steps:
+ - Put the pears in a pot.
+ - Fill the pot with water until it covers all the pears.
+ - Add the cinnamon.
+ - Add the sugar.
+ - Add the syrup.
+ - Stir until everything is combined.
+ - Let the pears stew until they are soft and have changed their color to a
+ bright pink.
+---
+
+A sweet dish, commonly served with gamey-meat or stewed meat.
+
+<!--more-->
+
+Originally made by my grandmother, this recipe is my attempt to get as close as
+possible to this little treat.
diff --git a/content/recipes/snacks/cheesecake-basque-burned.md b/content/recipes/snacks/cheesecake-basque-burned.md
index 0da9318..e59b9b2 100644
--- a/content/recipes/snacks/cheesecake-basque-burned.md
+++ b/content/recipes/snacks/cheesecake-basque-burned.md
@@ -16,13 +16,16 @@ ingredients:
amount: 500
unit: grams
- label: Flour
- amount: 40
+ amount: 50
unit: grams
- label: Granulated sugar
amount: 300
unit: grams
-- label: Heavy cream
- amount: 550
+- label: Heavy cream (35% fat)
+ amount: 500
+ unit: grams
+- label: Creme Fraiche
+ amount: 125
unit: grams
- label: Salt
amount: 1
diff --git a/content/services/_index.md b/content/services/_index.md
index dd98e8b..f303c7a 100644
--- a/content/services/_index.md
+++ b/content/services/_index.md
@@ -2,7 +2,8 @@
title: Services
---
-These are all the services I run for public use.
+These are all the services I run for public use. I give no guarantee on the
+stability of any of these services, nor the longevity of them.
<ul>
{{ range .Pages }}
diff --git a/content/services/fiche.md b/content/services/fiche.md
index d097014..62e0fe8 100644
--- a/content/services/fiche.md
+++ b/content/services/fiche.md
@@ -4,19 +4,9 @@ location: https://p.tyil.nl
upstream: https://github.com/solusipse/fiche
---
-Fiche is used for hosting pastes, which can then be accessed over HTTP at
-`p.tyil.nl`. The easiest way to create a new paste is with `nc`.
+Fiche is a service to host pastes, which can be sent to it through various
+command line utilities. The easiest way to create a new paste is with `nc`.
-To paste the output of a given command:
-
-```
+```sh
$command | nc tyil.nl 9999
```
-
-Or, to upload the contents of a file:
-
-```
-nc tyil.nl 9999 < file
-```
-
-The buffer size is set to 32k, so that is the maximum size you can paste to it.
diff --git a/content/services/invidious.md b/content/services/invidious.md
new file mode 100644
index 0000000..211879f
--- /dev/null
+++ b/content/services/invidious.md
@@ -0,0 +1,8 @@
+---
+title: Invidious
+location: https://youtube.alt.tyil.nl
+upstream: https://github.com/iv-org/invidious
+---
+
+Invidious is an alternative front-end to YouTube. It greatly diminishes the
+amount of JavaScript required to watch content.
diff --git a/content/services/nitter.md b/content/services/nitter.md
index 483fd7e..5bb111a 100644
--- a/content/services/nitter.md
+++ b/content/services/nitter.md
@@ -3,3 +3,6 @@ title: Nitter
location: https://twitter.alt.tyil.nl
upstream: https://github.com/zedeus/nitter
---
+
+Nitter is an alternative front-end to Twitter, which uses no JavaScript at all
+to render the posts and comments. It also supports RSS feeds for user profiles.
diff --git a/content/services/omgur.md b/content/services/omgur.md
index f3b54e1..68c73ee 100644
--- a/content/services/omgur.md
+++ b/content/services/omgur.md
@@ -4,5 +4,6 @@ location: https://imgur.alt.tyil.nl
upstream: https://github.com/geraldwuhoo/omgur
---
-This service does not implement a "front page", but actual links to images or
-albums should work fine.
+Omgur is a JavaScript free alternative front-end to Imgur. This project does
+not include a "front page", only pages which show actual uploaded content are
+implemented.
diff --git a/content/services/searx.md b/content/services/searx.md
deleted file mode 100644
index 1460f19..0000000
--- a/content/services/searx.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Searx
-location: https://searx.tyil.nl
-upstream: https://github.com/searx/searx
----
-
-Searx is a free internet metasearch engine which aggregates results from more
-than 70 search services. Users are neither tracked nor profiled.
diff --git a/content/services/searxng.md b/content/services/searxng.md
new file mode 100644
index 0000000..abdcaa2
--- /dev/null
+++ b/content/services/searxng.md
@@ -0,0 +1,9 @@
+---
+title: SearxNG
+location: https://searxng.tyil.nl
+upstream: https://docs.searxng.org/
+---
+
+SearXNG is a free internet metasearch engine which aggregates results from more
+than 70 search services. Users are neither tracked nor profiled. It is a fork of
+Searx.
diff --git a/content/services/teddit.md b/content/services/teddit.md
index 1bf4a7e..a8eff08 100644
--- a/content/services/teddit.md
+++ b/content/services/teddit.md
@@ -3,3 +3,6 @@ title: Teddit
location: https://reddit.alt.tyil.nl
upstream: https://github.com/teddit-net/teddit
---
+
+Teddit as an alternative front-end to Reddit, without the need for any
+JavaScript to operate.
diff --git a/layouts/_default/baseof.html b/layouts/_default/baseof.html
index 98e77f7..f69ca6a 100644
--- a/layouts/_default/baseof.html
+++ b/layouts/_default/baseof.html
@@ -5,7 +5,13 @@
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
+ <link rel="canonical" class="u-url" href="{{ .Permalink }}">
<link rel="stylesheet" type="text/css" href="{{ $cssMain.Permalink }}">
+ <link rel="me" href="https://fedi.tyil.nl/@tyil">
+ <link rel="me" href="https://git.tyil.nl">
+ <link rel="me" href="https://sr.ht/~tyil">
+ <link rel="me" href="https://www.tyil.nl" class="h-card">
+ <link rel="me" href="mailto:p.spek@tyil.nl">
{{- range .AlternativeOutputFormats }}
<link rel="{{ .Rel }}" type="{{ .MediaType.Type }}" title="{{ $.Site.Title }}" href="{{ .Permalink }}">
{{- end }}
@@ -17,7 +23,7 @@
{{- block "body" . }}
<header id="site-header">
<div class="container">
- <nav>
+ <nav class="main">
<a href="/" class="brand-name">{{ .Site.Title }}</a>
{{- range sort .Site.Sections "Title" }}
<a href="{{ .Permalink }}">{{ .Title }}</a>
diff --git a/layouts/posts/list.html b/layouts/posts/list.html
index 40a0ba5..b5ec8ca 100644
--- a/layouts/posts/list.html
+++ b/layouts/posts/list.html
@@ -6,11 +6,13 @@
<ul>
{{- range .Pages }}
<li>
- <a href="{{ .Permalink }}">{{ .Title }}</a>
+ <a class="u-url" href="{{ .Permalink }}"><span class="p-name">{{ .Title }}</span></a>
<small>
- {{ .Date | dateFormat "2006-01-02" }}
+ <time class="dt-published" datetime="{{ .Date | dateFormat "2006-01-02" }}">
+ {{ .Date | dateFormat "2006-01-02" }}
+ </time>
{{- range .Params.tags }}
- <a href="/tags/{{ . | lower }}">#{{ . }}</a>
+ <a class="p-category tag" href="/tags/{{ . | lower }}">{{ . }}</a>
{{- end }}
</small>
</li>
diff --git a/layouts/posts/list.xml b/layouts/posts/list.xml
index 17e054b..deb2448 100644
--- a/layouts/posts/list.xml
+++ b/layouts/posts/list.xml
@@ -30,7 +30,7 @@
<pubDate>{{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }}</pubDate>
{{ with .Site.Author.email }}<author>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</author>{{end}}
<guid>{{ .Permalink }}</guid>
- <description>{{ .Summary | html }}</description>
+ <description>{{ .Content | html }}</description>
</item>
{{ end }}
</channel>
diff --git a/layouts/posts/single.html b/layouts/posts/single.html
index cab9527..eca87b9 100644
--- a/layouts/posts/single.html
+++ b/layouts/posts/single.html
@@ -1,10 +1,13 @@
{{ define "main" }}
-<article>
+<article class="h-entry">
<header>
- <h1>{{ .Title }}</h1>
- {{- range .Params.tags }}
- <a href="/tags/{{ . | lower }}">#{{ . }}</a>
- {{- end }}
+ <h1 class="p-name">{{ .Title }}</h1>
+ <p>
+ {{- range .Params.tags }}
+ <a class="p-category tag" href="/tags/{{ . | lower }}">{{ . }}</a>
+ {{- end }}
+ &mdash; Published on <time class="dt-published" datetime="{{ .Date | dateFormat "2006-01-02" }}">{{ .Date | dateFormat "2006-01-02" }}</time>.
+ </p>
{{- if .Draft }}
<section class="admonition">
<div class="admonition-title">
@@ -19,7 +22,7 @@
</section>
{{- end }}
</header>
- <main>
+ <main class="e-content">
{{ .Content }}
</main>
<footer>
diff --git a/layouts/project-release/list.html b/layouts/project-release/list.html
new file mode 100644
index 0000000..b52dd0e
--- /dev/null
+++ b/layouts/project-release/list.html
@@ -0,0 +1,14 @@
+{{- define "main" }}
+<h1>{{ .Parent.Title }} Releases</h1>
+{{ .Content }}
+<ul>
+ {{- range .Pages }}
+ <li>
+ <a href="{{ .Permalink }}">{{ .Title }}</a>
+ ({{ .Date | dateFormat "2006-01-02" }})
+ </li>
+ {{- end }}
+</ul>
+<p>Follow the <a href="{{ .Permalink }}index.xml">RSS feed</a> to stay up to date with
+the latest {{ .Parent.Title }} releases.</p>
+{{- end }}
diff --git a/layouts/project-release/list.xml b/layouts/project-release/list.xml
new file mode 100644
index 0000000..74cdda2
--- /dev/null
+++ b/layouts/project-release/list.xml
@@ -0,0 +1,27 @@
+{{- printf "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>" | safeHTML }}
+<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
+ <channel>
+ <title>{{ .Title }}</title>
+ <link>{{ .Permalink }}</link>
+ <description>{{ .Title }} Releases</description>
+ <generator>Hugo -- gohugo.io</generator>{{ with .Site.LanguageCode }}
+ <language>{{.}}</language>{{end}}{{ with .Site.Author.email }}
+ <managingEditor>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</managingEditor>{{end}}{{ with .Site.Author.email }}
+ <webMaster>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</webMaster>{{end}}{{ with .Site.Copyright }}
+ <copyright>{{.}}</copyright>{{end}}{{ if not .Date.IsZero }}
+ <lastBuildDate>{{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }}</lastBuildDate>{{ end }}
+ {{- with .OutputFormats.Get "RSS" -}}
+ {{ printf "<atom:link href=%q rel=\"self\" type=%q />" .Permalink .MediaType | safeHTML }}
+ {{- end -}}
+ {{ range .Pages }}
+ <item>
+ <title>{{ .Title }}</title>
+ <link>{{ .Permalink }}</link>
+ <pubDate>{{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }}</pubDate>
+ {{ with .Site.Author.email }}<author>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</author>{{end}}
+ <guid>{{ .Permalink }}</guid>
+ <description>{{ .Content | html }}</description>
+ </item>
+ {{ end }}
+ </channel>
+</rss>
diff --git a/layouts/project-release/single.html b/layouts/project-release/single.html
new file mode 100644
index 0000000..e422699
--- /dev/null
+++ b/layouts/project-release/single.html
@@ -0,0 +1,20 @@
+{{ define "main" }}
+<article>
+ <header>
+ <h1>{{ .Title }}</h1>
+ </header>
+ <main>
+ {{ .Content }}
+ </main>
+ <footer>
+ {{- with .Params.packages }}
+ Pre-built packages:
+ <ul>
+ {{- range $key, $value := . }}
+ <li><a href="{{ $value }}">{{ $key }}</a></li>
+ {{- end }}
+ </ul>
+ {{- end }}
+ </footer>
+</article>
+{{ end }}
diff --git a/layouts/projects/list.html b/layouts/projects/list.html
new file mode 100644
index 0000000..de50a9c
--- /dev/null
+++ b/layouts/projects/list.html
@@ -0,0 +1,21 @@
+{{- define "main" }}
+<h1>{{ .Title }}</h1>
+{{ .Content }}
+{{- range .Sections }}
+ <section class="project">
+ <h2>
+ {{ .Title }}
+ <small>
+ {{- range .Params.languages }}
+ {{ . }}
+ {{- end }}
+ </small>
+ </h2>
+ {{ .Content }}
+ <nav class="project">
+ <a href="{{ .Permalink }}releases">Releases</a>
+ <a href="{{ .Params.repository }}">Sources</a>
+ </nav>
+ </section>
+{{- end }}
+{{- end }}
diff --git a/layouts/recipes/list.html b/layouts/recipes/list.html
index 9c51074..b758a2b 100644
--- a/layouts/recipes/list.html
+++ b/layouts/recipes/list.html
@@ -1,17 +1,24 @@
{{- define "main" }}
<h1>{{ .Title }}</h1>
{{ .Content }}
-<ul>
- {{- range sort .Paginator.Pages "Title" }}
- <li>
- <a href="{{ .Permalink }}">{{ .Title }}</a>
- <small>
- {{ .Summary }}
- {{- range .Params.tags }}
- <a href="/tags/{{ . | lower }}">#{{ . }}</a>
- {{- end }}
- </small>
- </li>
- {{- end }}
-</ul>
+{{- range sort .Pages "Title" }}
+<section class="recipe">
+ <div class="preview">
+ {{- with .Params.preview }}
+ <img src="{{ .src }}" />
+ {{- end }}
+ </div>
+ <div class="description">
+ <h2>
+ <a class="p-name" href="{{ .Permalink }}">{{ .Title }}</a>
+ </h2>
+ <ul class="taglist">
+ {{- range sort .Params.tags }}
+ <li><a class="p-category tag" href="/tags/{{ . | lower }}">{{ . }}</a></li>
+ {{- end }}
+ </ul>
+ {{ .Content }}
+ </div>
+</section>
+{{- end }}
{{- end }}
diff --git a/layouts/recipes/single.html b/layouts/recipes/single.html
index 7919970..353abd2 100644
--- a/layouts/recipes/single.html
+++ b/layouts/recipes/single.html
@@ -1,14 +1,12 @@
-{{ define "head" }}
- <script type="text/javascript" src="/js/cookbook.js"></script>
-{{- end }}
-
{{ define "main" }}
<article>
<header>
- <h1>{{ .Title }}</h1>
- {{- range .Params.tags }}
- <a href="/tags/{{ . | lower }}">#{{ . }}</a>
- {{- end }}
+ <h1 class="p-name">{{ .Title }}</h1>
+ <ul class="taglist">
+ {{- range sort .Params.tags }}
+ <li><a class="p-category tag" href="/tags/{{ . | lower }}">{{ . }}</a></li>
+ {{- end }}
+ </ul>
{{- if .Draft }}
<section class="admonition">
<div class="admonition-title">
@@ -26,7 +24,7 @@
</section>
{{- end }}
</header>
- <main>
+ <main class="e-content">
{{ .Content }}
<table>
<tbody>
@@ -81,6 +79,9 @@
<h2>Instructions</h2>
{{- range $i, $stage := .Params.stages }}
<h3>{{ $stage.label }}</h3>
+ {{- if $stage.notes }}
+ <p>{{ .notes }}</p>
+ {{- end }}
<ol>
{{- range $j, $step := $stage.steps }}
<li>
diff --git a/layouts/services/list.html b/layouts/services/list.html
index 0f06f87..8e78be0 100644
--- a/layouts/services/list.html
+++ b/layouts/services/list.html
@@ -1,12 +1,13 @@
{{ define "main" }}
<h1>{{ .Title }}</h1>
{{ .Content }}
-<ul>
- {{ range .Paginator.Pages }}
- <li>
- <a href="{{ .Permalink }}">{{ .Title }}</a>
- <small>{{ .Params.location }}</small>
- </li>
- {{ end }}
-</ul>
+{{ range .Pages }}
+<section class="service">
+ <h2>
+ {{ .Title }}
+ <small><a href="{{ .Params.location }}">{{ .Params.location }}</a></small>
+ </h2>
+ {{ .Content }}
+</section>
+{{ end }}
{{ end }}