master
Jeff Clement 2 years ago
commit cfdc0b0b00
No known key found for this signature in database
GPG Key ID: 20D789D0E107ED2D
  1. 19
      .build.yml
  2. 4
      .gitignore
  3. 52
      .gitlab-ci.yml
  4. 0
      .gitmodules
  5. 87
      Gruntfile.js
  6. 21
      LICENSE
  7. 3
      README.md
  8. 139
      config.toml
  9. 0
      content/_index.md
  10. 64
      content/about/index.md
  11. 203
      content/post/2014/openbsd-yubikey-pin/index.md
  12. 93
      content/post/2014/openbsd-yubikey/index.md
  13. BIN
      content/post/2015/gpg-smartcard/card-vs-neo.jpg
  14. BIN
      content/post/2015/gpg-smartcard/gpg.tgz
  15. 1007
      content/post/2015/gpg-smartcard/index.md
  16. BIN
      content/post/2015/gpg-smartcard/osx/mail.png
  17. BIN
      content/post/2015/gpg-smartcard/windows/gpg.png
  18. BIN
      content/post/2015/gpg-smartcard/windows/putty.png
  19. BIN
      content/post/2015/gpg-smartcard/windows/windows_configuration.png
  20. BIN
      content/post/2015/gpg-smartcard/yubikey-mode.png
  21. BIN
      content/post/2016/kub-kar-timer/construction1.jpg
  22. 59
      content/post/2016/kub-kar-timer/index.md
  23. BIN
      content/post/2016/kub-kar-timer/inplace.jpg
  24. BIN
      content/post/2016/kub-kar-timer/kar.jpg
  25. BIN
      content/post/2016/kub-kar-timer/overhead.jpg
  26. BIN
      content/post/2016/kub-kar-timer/schematic.png
  27. BIN
      content/post/2016/kub-kar-timer/trigger.jpg
  28. 11
      content/post/2017/qubes-os-presentation/index.md
  29. 151
      content/post/2018/docker-ghost/index.md
  30. BIN
      content/post/2018/docker-ghost/ssl.png
  31. 1172
      content/post/2018/gpg-yubikey5/index.md
  32. BIN
      content/post/2018/gpg-yubikey5/yubiconfig.png
  33. 65
      content/post/2018/nginx-semiprivate/index.md
  34. 545
      content/post/2019/blog-hugo-gitlab/index.md
  35. BIN
      content/post/2019/blog-hugo-gitlab/pipelines.png
  36. BIN
      content/post/2019/blog-hugo-gitlab/search.gif
  37. BIN
      content/post/2019/blog-hugo-gitlab/variables.png
  38. 122
      content/post/2019/ubuntu-18-04-encrypted-disks-with-usb-boot/index.md
  39. 136
      content/post/2019/wireguard-access-server/index.md
  40. BIN
      content/post/2019/wireguard-access-server/phone.png
  41. 165
      content/post/2019/yubikey-setup/index.md
  42. BIN
      content/post/2019/yubikey-setup/yubimgr-switch-slots.png
  43. 222
      content/post/2020/nginx_golang_react_sockets/index.md
  44. 75
      content/post/2020/tor-relay/index.md
  45. BIN
      content/post/2020/tor-relay/nyx.png
  46. 18
      content/post/templates/2015-01-04-first-post.md
  47. 49
      content/post/templates/index.md
  48. 1
      static/.well-known/CloudIdVerificationCode.txt
  49. 56
      static/.well-known/keybase.txt
  50. 881
      static/css/main.css
  51. 59
      static/css/syntax.css
  52. BIN
      static/gallery/me/uni1.jpg
  53. BIN
      static/gallery/me/uni2.jpg
  54. 1
      static/googleeb2aa5f1f1268679.html
  55. 110
      static/gpg/aucerna.asc
  56. 171
      static/gpg/jclement-gpg.asc
  57. 79
      static/gpg/transition_20150323.txt
  58. 82
      static/gpg/transition_20181009.txt
  59. 83
      static/gpg/transition_20191004.txt
  60. BIN
      static/img/avatar-icon.png
  61. BIN
      static/img/favicon.ico
  62. 56
      static/keybase.txt
  63. 3
      static/robots.txt
  64. 17
      themes/jeff/.gitattributes
  65. 50
      themes/jeff/.gitignore
  66. 22
      themes/jeff/LICENSE
  67. 157
      themes/jeff/README.md
  68. 9
      themes/jeff/archetypes/default.md
  69. 137
      themes/jeff/data/beautifulhugo/social.toml
  70. 109
      themes/jeff/exampleSite/config.toml
  71. 2
      themes/jeff/exampleSite/content/_index.md
  72. 16
      themes/jeff/exampleSite/content/page/about.md
  73. 6
      themes/jeff/exampleSite/content/post/2015-01-04-first-post.md
  74. 6
      themes/jeff/exampleSite/content/post/2015-01-15-pirates.md
  75. 11
      themes/jeff/exampleSite/content/post/2015-01-19-soccer.md
  76. 6
      themes/jeff/exampleSite/content/post/2015-01-27-dear-diary.md
  77. 41
      themes/jeff/exampleSite/content/post/2015-02-13-hamlet-monologue.md
  78. 35
      themes/jeff/exampleSite/content/post/2015-02-20-test-markdown.md
  79. 14
      themes/jeff/exampleSite/content/post/2015-02-26-flake-it-till-you-make-it.md
  80. 42
      themes/jeff/exampleSite/content/post/2016-03-08-code-sample.md
  81. 49
      themes/jeff/exampleSite/content/post/2017-03-05-math-sample.md
  82. 40
      themes/jeff/exampleSite/content/post/2017-03-07-bigimg-sample.md
  83. 37
      themes/jeff/exampleSite/content/post/2017-03-20-photoswipe-gallery-sample.md
  84. 7
      themes/jeff/exampleSite/layouts/partials/footer_custom.html
  85. 18
      themes/jeff/exampleSite/layouts/partials/head_custom.html
  86. 0
      themes/jeff/exampleSite/static/.gitkeep
  87. 74
      themes/jeff/i18n/br.yaml
  88. 74
      themes/jeff/i18n/de.yaml
  89. 74
      themes/jeff/i18n/en.yaml
  90. 74
      themes/jeff/i18n/eo.yaml
  91. 74
      themes/jeff/i18n/es.yaml
  92. 74
      themes/jeff/i18n/fr.yaml
  93. 74
      themes/jeff/i18n/it.yaml
  94. 74
      themes/jeff/i18n/ja.yaml
  95. 74
      themes/jeff/i18n/nb.yaml
  96. 74
      themes/jeff/i18n/nl.yaml
  97. 74
      themes/jeff/i18n/pl.yaml
  98. 74
      themes/jeff/i18n/ru.yaml
  99. 74
      themes/jeff/i18n/zh-CN.yaml
  100. 74
      themes/jeff/i18n/zh-TW.yaml
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,19 @@
image: alpine/latest
oauth: pages.sr.ht/PAGES:RW
secrets:
- 7e266198-e44f-4acc-a6e5-9b845f3180c8
sources:
- git@git.sr.ht:~onewheelgeek/straybits.org
packages:
- hugo
- nodejs
environment:
site: straybits.org
tasks:
- package: |
cd $site
hugo
tar -C public -cvz . > ../site.tar.gz
- upload: |
acurl -f https://pages.sr.ht/publish/$site \
-Fcontent=@site.tar.gz

4
.gitignore vendored

@ -0,0 +1,4 @@
public/
node_modules/
package-lock.json
.DS_Store

@ -0,0 +1,52 @@
stages:
- build
- index
- deploy
build:
stage: build
image: registry.gitlab.com/pages/hugo:latest
variables:
GIT_SUBMODULE_STRATEGY: recursive
script:
- hugo version
- hugo
artifacts:
paths:
- public
only:
- master
index:
stage: index
image: node
variables:
GIT_SUBMODULE_STRATEGY: recursive
script:
- npm install -g grunt
- npm install grunt string yamljs
- grunt lunr-index
artifacts:
paths:
- public
only:
- master
deploy:
stage: deploy
image: alpine:latest
before_script:
- apk update && apk add openssh-client bash rsync
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
- mkdir -p ~/.ssh
- chmod 700 ~/.ssh
- echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts
- chmod 644 ~/.ssh/known_hosts
environment:
name: production
url: zeos.ca
script:
- rsync -hrvz --delete --exclude=_ -e "ssh" --progress public/ deploy_zeos@wilbur.zeos.ca:/var/www/zeos.ca
only:
- master

@ -0,0 +1,87 @@
var yaml = require("yamljs");
var S = require("string");
var CONTENT_PATH_PREFIX = "content";
module.exports = function(grunt) {
grunt.registerTask("lunr-index", function() {
grunt.log.writeln("Build pages index");
var indexPages = function() {
var pagesIndex = [];
grunt.file.recurse(CONTENT_PATH_PREFIX, function(abspath, rootdir, subdir, filename) {
grunt.verbose.writeln("Parse file:",abspath);
var processedFile = processFile(abspath, filename);
if (processedFile) {
pagesIndex.push(processedFile);
}
});
return pagesIndex;
};
var processFile = function(abspath, filename) {
var pageIndex;
if (S(filename).endsWith(".html")) {
pageIndex = processHTMLFile(abspath, filename);
} else if (S(filename).endsWith(".md")) {
pageIndex = processMDFile(abspath, filename);
}
return pageIndex;
};
var processHTMLFile = function(abspath, filename) {
var content = grunt.file.read(abspath);
var pageName = S(filename).chompRight(".html").s;
var href = S(abspath)
.chompLeft(CONTENT_PATH_PREFIX).s;
return {
title: pageName,
href: href,
content: S(content).trim().stripTags().stripPunctuation().s
};
};
var processMDFile = function(abspath, filename) {
var content = grunt.file.read(abspath);
var pageIndex;
// First separate the Front Matter from the content and parse it
content = content.split("---");
var frontMatter;
try {
frontMatter = yaml.parse(content[1].trim());
} catch (e) {
grunt.log.writeln(filename + " - " + e.message);
return;
}
var href = S(abspath).chompLeft(CONTENT_PATH_PREFIX).chompRight(".md").s;
// href for index.md files stops at the folder name
if (filename === "index.md") {
href = S(abspath).chompLeft(CONTENT_PATH_PREFIX).chompRight(filename).s;
}
// Skip drafts
if (frontMatter.draft) {
return;
}
// Build Lunr index for this page
pageIndex = {
title: frontMatter.title,
tags: frontMatter.tags,
href: href,
content: S(content[2]).trim().stripTags().stripPunctuation().s
};
return pageIndex;
};
grunt.file.write("public/lunr.json", JSON.stringify(indexPages()));
grunt.log.ok("Index built");
});
};

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Spencer Lyon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,3 @@
![Build Status](https://gitlab.com/pages/hugo/badges/master/build.svg)
# zeos.ca

@ -0,0 +1,139 @@
baseurl = "https://straybits.org"
contentdir = "content"
layoutdir = "layouts"
publishdir = "public"
title = "StrayBits"
canonifyurls = true
DefaultContentLanguage = "en"
theme = "jeff"
metaDataFormat = "yaml"
disqusShortname = "zeos-ca"
#googleAnalytics = "XXX"
pygmentsUseClasses = true
pygmentsCodeFences = true
pygmentsCodefencesGuessSyntax = false
[BlackFriday]
smartypants = false
[markup]
[markup.goldmark]
[markup.goldmark.renderer]
unsafe = true
[markup.tableOfContents]
endLevel = 3
startLevel = 1
[Params]
subtitle = "Code, Wood, Unicycles & HAM"
readingTime = true
hideAuthor = true
logo = "img/avatar-icon.png"
favicon = "img/favicon.ico"
dateFormat = "January 2, 2006"
commit = false
selfHosted = true
rss = true
comments = true
#gcse = "002888195400749182309:cakccbp7nyf"
[[Params.bigimg]]
src = "img/triangle.jpg"
#desc = "Triangle"
#[[Params.bigimg]]
# src = "img/sphere.jpg"
# desc = "Sphere"
#[[Params.bigimg]]
# src = "img/hexagon.jpg"
# desc = "Hexagon"
[Author]
name = "Jeff Clement"
#email = "jeff at zeos dot ca"
#facebook = "username"
#googleplus = "+username" # or xxxxxxxxxxxxxxxxxxxxx
gitlab = "jeff"
github = "jclement"
twitter = "OneWheelGeek"
reddit = "OneWheelGeek"
#linkedin = "username"
#xing = "username"
#stackoverflow = "users/XXXXXXX/username"
#snapchat = "username"
#instagram = "username"
#youtube = "user/username" # or channel/channelname
#soundcloud = "username"
#spotify = "username"
#bandcamp = "username"
#itchio = "username"
mastodon = "onewheelgeek@mastodon.social"
#keybase = "jsc"
[[menu.main]]
name = "Blog"
url = ""
weight = 1
# [[menu.main]]
# identifier = "samples"
# name = "Samples"
# weight = 2
# [[menu.main]]
# parent = "samples"
# name = "Big Image Sample"
# url = "post/2017-03-07-bigimg-sample"
# weight = 1
# [[menu.main]]
# parent = "samples"
# name = "Math Sample"
# url = "post/2017-03-05-math-sample"
# weight = 2
# [[menu.main]]
# parent = "samples"
# name = "Code Sample"
# url = "post/2016-03-08-code-sample"
# weight = 3
#[[menu.main]]
# identifier = "projects"
# name = "Projects"
# weight = 2
[[menu.main]]
identifier = "projects"
name = "Projects"
weight = 4
[[menu.main]]
parent = "projects"
name = "Werdz"
url = "https://werdz.ca"
weight = 1
[[menu.main]]
identifier = "code"
name = "Code"
weight = 3
[[menu.main]]
parent = "code"
name = "Github"
url = "https://github.com/jclement"
weight = 2
[[menu.main]]
name = "About"
url = "/about/"
weight = 80
[[menu.main]]
name = "Tags"
url = "tags"
weight = 90

@ -0,0 +1,64 @@
---
title: About me
comments: false
---
{{< gallery dir="/gallery/me" caption-effect="fade" />}}
My name is Jeff Clement.
- I ride bikes and unicycles
- I make things, usually out of wood
- I make software
- I love computer security and privacy
### Contact Information
<table width="100%">
<tbody>
<tr>
<th>E-Mail:</th>
<td><a href="#" class="mail-link" data="jeffrey.clement at gmail dot com"><span class="mail-addr"></span></a></td>
</tr>
<tr>
<th>GnuPG:</th>
<td><a href="https://keybase.io/jsc/key.asc">61A0 DD3C D0EE 9A8D 32C2 1ADF 9C4D 3814 37B2 E99B</a><br />
<a href="/gpg/transition_20191004.txt">transition statement 2019-10-04</a><br />
<a href="/gpg/transition_20181009.txt">transition statement 2018-10-09</a><br />
<a href="/gpg/transition_20150323.txt">transition statement 2015-03-23</a></td>
</tr>
<tr>
<th>LinkedIn:</th>
<td><a href="http://www.linkedin.com/profile/view?id=6394933">Jeff Clement</a></td>
</tr>
<tr>
<th>Github:</th>
<td><a href="https://github.com/jclement">jclement</a></td>
</tr>
<tr>
<th>Twitter:</th>
<td><a href="https://twitter.com/OneWheelGeek">@OneWheelGeek</a></td>
</tr>
<tr>
<th>Mastodon:</th>
<td><a href="https://mastodon.social/@OneWheelGeek">OneWheelGeek@mastodon.social</a></td>
</tr>
<tr>
<th>Reddit:</th>
<td><a href="https://www.reddit.com/user/onewheelgeek">OneWheelGeek</a></td>
</tr>
<tr>
<th>Keybase:</th>
<td><a href="https://keybase.io/jsc">jsc</a></td>
</tr>
</tbody>
</table>

@ -0,0 +1,203 @@
---
title: OpenBSD Yubikey Authentication with PIN
tags: [openbsd, yubikey]
date: 2014-05-05
comments: true
---
I think that using the Yubikey for authentication is worthwhile. OpenBSD's
current implementation of `login_yubikey.c`, however, relies entirely on
the one-time password. I think the system would be stronger combining
the Yubikey with an additional PIN so that a compromise of the physical
security of the token doesn't compromise the associated account.
My work is loosely based off of [Remi Locherer's suggested patch][patch]. Where it differs is that I'd like to add an optional additional
PIN to the authentication rather than use an existing credential, such
as the system password. My thinking is, if you are using the Yubikey
token already, the PIN probably can be a fairly low strength password.
The system password shouldn't be set to something simple. This allows
for relaxed rules the the Yubikey PIN without affecting the system
password policy as a whole.
I propose adding a new `/var/db/yubikey/$user.pin` file that contains an
encrypted additional PIN (password). If present, this PIN must precede
the Yubikey one-time password when authenticating.
The password in `$user.pin` is encrypted in a manner similar to those in
`/etc/master.passwd`. Obviously, in a multi-user system some tool would
need to be devised to maintain these PIN/passwords. For my purposes,
the following works
```
# encrypt > /var/db/yubi/$user.pin
password_goes_here
```
This has a couple nice side-effects:
1. By verifying hashed passwords I believe it is less susceptible to
timing attacks and doesn't need a fancy time invariant string compare function
2. Physical compromise of the Yubikey token does not immediately yield
access to the associated account
3. Compromise (read) of the contents of `/var/db/yubikey` does not
immediately allow an attacker to access those accounts
This change is non-breaking in that, if the `$user.pin` file is not
present, login_yubikey works as before.
```diff
Index: libexec/login_yubikey/login_yubikey.8
===================================================================
RCS file: /cvs/src/libexec/login_yubikey/login_yubikey.8,v
retrieving revision 1.8
diff -u -p -u -p -r1.8 login_yubikey.8
--- libexec/login_yubikey/login_yubikey.8 14 Aug 2013 08:39:31 -0000 1.8
+++ libexec/login_yubikey/login_yubikey.8 7 May 2014 13:13:50 -0000
@@ -85,8 +85,10 @@ will read the user's UID (12 hex digits)
.Em user.uid ,
the user's key (32 hex digits) from
.Em user.key ,
-and the user's last-use counter from
-.Em user.ctr
+the user's last-use counter from
+.Em user.ctr ,
+and the user's PIN (optional) from
+.Em user.pin
in the
.Em /var/db/yubikey
directory.
@@ -99,6 +101,14 @@ If
does not have a last-use counter, a value of zero is used and
any counter is accepted during the first login.
.Pp
+If
+.Ar user
+does have a PIN file, the PIN must be provided before the one-time password
+and the PIN will be verified (using
+.Xr crypt 8 ) against the contents of the PIN
+file. If the PIN file is not present, the user must provide only the one-time
+password.
+.Pp
The one-time password provided by the user is decrypted using the
user's key.
After the decryption, the checksum embedded in the one-time password
@@ -124,4 +134,5 @@ Directory containing user entries for Yu
.El
.Sh SEE ALSO
.Xr login 1 ,
-.Xr login.conf 5
+.Xr login.conf 5 ,
+.Xr crypt 8
Index: libexec/login_yubikey/login_yubikey.c
===================================================================
RCS file: /cvs/src/libexec/login_yubikey/login_yubikey.c,v
retrieving revision 1.8
diff -u -p -u -p -r1.8 login_yubikey.c
--- libexec/login_yubikey/login_yubikey.c 27 Nov 2013 21:25:25 -0000 1.8
+++ libexec/login_yubikey/login_yubikey.c 7 May 2014 13:13:50 -0000
@@ -44,6 +44,7 @@
#include <syslog.h>
#include <unistd.h>
#include <errno.h>
+#include <util.h>
#include "yubikey.h"
@@ -54,15 +55,18 @@
#define AUTH_OK 0
#define AUTH_FAILED -1
+#define YUBIKEY_LENGTH 44
+
static const char *path = "/var/db/yubikey";
static int clean_string(const char *);
static int yubikey_login(const char *, const char *);
+static int pin_login(const char *, const char *);
int
main(int argc, char *argv[])
{
- int ch, ret, mode = MODE_LOGIN;
+ int ch, ret, ret_pin, mode = MODE_LOGIN;
FILE *f = NULL;
char *username, *password = NULL;
char response[1024];
@@ -151,9 +155,33 @@ main(int argc, char *argv[])
}
}
- ret = yubikey_login(username, password);
+ int password_length = strlen(password)-YUBIKEY_LENGTH;
+
+ /* if the password length < 0 that means this isn't even long enough to contain a valid yubi token */
+ if (password_length < 0) {
+ syslog(LOG_INFO, "user %s: reject", username);
+ fprintf(f, "%s\n ", BI_REJECT);
+ closelog();
+ return (EXIT_SUCCESS);
+ }
+
+ char password_pin[password_length +1];
+ char password_yubi[YUBIKEY_LENGTH + 1];
+
+ /* first password_length bytes are PIN */
+ strlcpy(password_pin, password, password_length + 1);
+
+ /* remaining 44 bytes are yubikey token */
+ strlcpy(password_yubi, (char*)password + password_length, YUBIKEY_LENGTH + 1);
+
+ ret = yubikey_login(username, password_yubi);
+ ret_pin = pin_login(username, password_pin);
+
memset(password, 0, strlen(password));
- if (ret == AUTH_OK) {
+ memset(password_pin, 0, strlen(password_pin));
+ memset(password_yubi, 0, strlen(password_yubi));
+
+ if (ret == AUTH_OK && ret_pin == AUTH_OK) { /* successfull login calls both yubi/pin code and requires AUTH_OK from both */
syslog(LOG_INFO, "user %s: authorize", username);
fprintf(f, "%s\n", BI_AUTH);
} else {
@@ -174,6 +202,38 @@ clean_string(const char *s)
}
return (1);
}
+
+static int
+pin_login(const char *username, const char *pin)
+{
+ char fn[MAXPATHLEN];
+ FILE *f;
+ char encrypted_pin[101]; // pin is salted/hashed (crypt)
+
+ snprintf(fn, sizeof(fn), "%s/%s.pin", path, username);
+ if ((f = fopen(fn, "r")) == NULL) {
+ if (strlen(pin) > 0) {
+ syslog(LOG_ERR, "user %s: fopen: %s: %m", username, fn);
+ return (AUTH_FAILED);
+ } else {
+ /* if pin is empty and file is missing revert to original behaviour */
+ return (AUTH_OK);
+ }
+ }
+
+ if (fscanf(f, "%100s", encrypted_pin) != 1) {
+ syslog(LOG_ERR, "user %s: fscanf: %s: %m", username, fn);
+ fclose(f);
+ return (AUTH_FAILED);
+ }
+ fclose(f);
+
+ char* salted_pin = crypt(pin, encrypted_pin);
+ if (strcmp(salted_pin, encrypted_pin) != 0)
+ return (AUTH_FAILED);
+
+ return (AUTH_OK);
+};
static int
yubikey_login(const char *username, const char *password)
```
[patch]: http://comments.gmane.org/gmane.os.openbsd.tech/34693

@ -0,0 +1,93 @@
---
title: OpenBSD Yubikey Authentication
tags: [openbsd, yubikey]
date: 2014-05-05
comments: true
---
OpenBSD includes out-of-the-box support for login via. [YubiKey][yubikey]. Yay!
OpenBSD doesn't authenticate against a central server (such as the service offered by Yubico) to verify a YubiKey. This is good because I don't have to trust a 3rd party with my credentials. Unfortunately, this also means that OpenBSD is tracking the "last-use" token (not centralized) which means that without somehow synchronizing the "last-use" value I can only safely use a YubiKey token on a single machine. Using it on multiple machines would open me up to replay attacks where a YubiKey entered on one machine (where "last-use" is big), could be used on another machine (where "last-use" is smaller).
I can live with this but it's something to be aware of.
The OpenBSD YubiKey authentication ''replaces'' password authentication. Ideally, I would have to provide both a password and a YubiKey as credentials so that finding my YubiKey is not sufficient to compromise my system. Fortunately [a patch exists][patch] that allows me to use both.
<div class="alert alert-success">
<b>Update:</b> See my <a href="../openbsd-yubikey-pin/">login_yubikey.c patch</a> that adds support for an additional PIN when logging in with a Yubikey.
</div>
## Configuring the YubiKey ##
Configuring the YubiKey is a bit of a pain. Yubico offers some nice utilities and it's easiest to run these on Windows. My preferred approach is to setup a Windows VM with the Yubi tools, turn off networking, snapshot it, program my YubiKeys, record the private id / secret key, and then rollback the snapshot.
Roughly:
1. Start the YubiKey Personalization Tool
2. Insert your YubiKey
3. Click "Yubico OTP" in the header at the top
4. Click "Quick"
5. Choose a configuration slot to program (1 is often pre-programmed so you may want to be careful overwriting that)
6. Record the "Private Identity" and "Secret Key"
7. Click "Write Configuration" to push those keys onto your YubiKey
The "Advanced" mode lets you lock your YubiKey to prevent overwriting your credentials.
![Yubikey Setup](setup.png)
## Configuring OpenBSD to Login via. YubiKey ##
1. Grab the value from the "Private Identity" field and put it into `/var/db/yubikey/$user.uid` (removing all the spaces!)
2. Grab the value from the "Secret Key" field and put it into `/var/db/yubikey/$user.key` (removing all the spaces!)
3. Make sure permissions are correct (owned by root.auth, 0640)
<div class="alert alert-info">I got hung up on the lack of spaces in the identity/key values. The YubiKey personalization tool includes spaces between hex digits while the OpenBSD configuration files do not. </div>
Note: The echo "..." statements below work but shouldn't really be used. They will expose the secret key in the process list and your shell's history. Best to just open the files in a text editor and type/paste those values in.
```
# cd /var/db/yubikey
# echo "f2a4ac5bb965" > bob.uid
# echo "b70c2224b328523b43d46f4bdb5221a6" > bob.key
# chown root.auth bob.*
# chmod 640 bob.*
# ls -l
total 32
-rw-r----- 1 root auth 33 Apr 16 07:32 bob.key
-rw-r----- 1 root auth 13 Apr 16 07:32 bob.uid
-rw-rw---- 1 root auth 3 Apr 16 07:30 root.ctr
-rw-r----- 1 root auth 33 Apr 16 07:29 root.key
-rw-r----- 1 root auth 13 Apr 16 07:29 root.uid
```
Open `/etc/login.conf` and add "yubikey" to the auth-defaults.
<div class="alert alert-danger">This will REQUIRE YubiKey logins for <b>ALL</b> users of the system so make sure you have one setup for root before enabling this or you'll be fixing it with single-user mode.</div>
```
auth-defaults:auth=yubikey,passwd,skey:
```
## Configuring SSH to require YubiKey + Public ##
I choose to configure SSH to require login using a public key AND my YubiKey by adding the following two lines to `/etc/ssh/sshd_config`.
```
AuthenticationMethods publickey,password
PasswordAuthentication no
```
When you login you should see something like this:
```
$ ssh bob@localhost
OpenBSD 5.4 (GENERIC) #37: Tue Jul 30 15:24:05 MDT 2013
Enter passphrase for key '/home/bob/.ssh/id_ecdsa':
Authenticated with partial success.
bob@localhost's password: [PRESS YUBIKEY BUTTON HERE]
$
```
[article]: http://blog.cmacr.ae/2fa-with-the-yubikey-for-ssh-access/
[yubikey]: https://www.yubico.com/products/yubikey-hardware/yubikey/
[patch]: http://comments.gmane.org/gmane.os.openbsd.tech/34693

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

@ -0,0 +1,59 @@
---
title: Kub Kar Timer
date: 2016-02-14
template: article.jade
comments: true
toc: true
---
My boys are in Boy Scouts and the annual Kub Kar races are a fun part of the program. Our group has a couple older wooden tracks and I wanted to add a timer mechanism to them that would time and rank each car for each race.
I decided to build this based on the Arduino platform because I'm at least somewhat familiar with it.
<p class="note">Update 2016-02-21 - We had our first rally using this contraption and it worked flawlessly. The kids loved it.</p>
![In Place](inplace.jpg)
There are [companies that sell timers](http://www.besttrack.com/champ_timer.htm) but at $615 for a 4 track timer... Ouch!
## The Setup
![Schematic](schematic.png)
<p class="note">All the input PINs (D 2,3,4,5,6) should have their internal pull-up enabled.</p>
## The Trigger
The complicated part of this project is detecting cars as they pass through finish gate. Typically this would be done optically with the car breaking a beam and that stopping the clock. However, I finally started this project three days before the race. I didn't have any lasers or photo-sensors handy so I took an alternative and mechanical approach.
![Trigger](trigger.jpg)
I considered adding push buttons at the very end of the track but, given the speed some of the cars are going, I figured I'd either damage the buttons or the cars with that approach.
The premise for my final solution was to dangle a conductive "wire" (lamp pull chain) across the track and the car will push it into another conductor (surrounding copper tube) closing a circuit. Because the switch isn't in charge of stopping the car I figured this would hold up to the abuse better.
![Kub Kar on Track](kar.jpg)
I ended up drilling a 1/2" hole in the underside of the timing platform and insert a 1" section of 1/2" copper pipe. Directly through the center I dangled a chunk of steel lamp pull-chain. The copper tubes are all tied to ground and the chains are tied to the Arduino's digital IO pins (with internal pull-up resistors enabled).
<p class="note">With this design it's important that the chain in each lane is a few links too long and dangles on the lane. This prevents the chain from swinging back and forth for ages and allows you to get on with the next race much faster.</p>
For actually triggering the race I ran cables from the timer to the starting gate and then used some Allround as an improvised switch that was closed when the gate was up. As soon as the gate drops the timer starts.
## The Display
If I were good at this type of thing I think the right displays would have been 4 big 7-segment displays for the placing, and then 4 multi-digit displays for the times. Using big 7-segment displays would be nice because you can read them from far away.
I opted for a small 20x4 LCD I2C display because that's what I had handy.
## The Software
The source code for this entire project is up on [Bitbucket](https://bitbucket.org/jclement/kub-kar-timer).
## The Build
I built the track from 1/2" Baltic birch plywood.
The only real concerns are making sure it's wide enough to straddle the track and high-enough to allow verticals to pass underneath without incident.
The top is covered with a piece of 1/4" Lexan to allow everyone to see the interesting bits inside.

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

@ -0,0 +1,11 @@
---
title: QubesOS Presentation
date: 2017-03-02
comments: true
tags: [qubesos, presentation]
toc: true
---
I'm a huge fan of QubesOS. Here are some slides from an internal company presentation trying to inflict QubesOS on my coworkers.
<!--more-->
<script async class="speakerdeck-embed" data-id="5efcb0cba6654f54b262e8bc10f0232a" data-ratio="1.77777777777778" src="//speakerdeck.com/assets/embed.js"></script>

@ -0,0 +1,151 @@
---
title: Deploying Ghost with Docker & NGINX
date: 2018-09-12
comments: true
tags: [server]
toc: false
---
It seemed like a good idea to try something new with this website. I settled on running the fancy blogging software Ghost because it looked pretty, has a wonderful editing experience (with markdown support), and (most importantly) I'd never used it before.
<!--more-->
I have a VPS through Digital Ocean hosting this site and a few other things. I wanted to use that VPS rather than maintaining yet another machine. There are a couple things about Ghost that make me uncomfortable installing it on the bare machine.
1. It requires a specific globally installed version of Node
2. It doesn't install using Ubuntu's packing system but is, instead, installed using its own deployment script
3. There is no support for running multiple Ghost instances on a single machine
I wanted to:
* Limit to impact on the bare machine
* Have the ability to run multiple Ghost instances
* Run Ghost behind NGINX and have NGINX handle SSL termination (LetsEncrypt)
* Have some sort of safe mechanism for performing upgrades
* Isolate the Ghost process as much as possible so that if it turns evil its impact on the rest of the system is minimized
This seemed like an ideal project to deploy under Docker. This post is mostly documentation of that process so that future Jeff (when he has to fix something) remembers how this worked.
Fortunately, I'm not the first to want to deploy Ghost using Docker so there are a number of Docker images. I picked the official one here.
Getting it up and running is fairly easy:
```
docker run --name ghost-forgesi \
-p 127.0.0.1:13003:2368 \
-d ghost:alpine
```
That starts the Ghost docker image and exposes it on port 13003 on localhost.
Next step is to configure NGINX to forward requests for my website through to my Ghost instance.
I added a new site on `sites-available` and symlinked that into `sites-enabled` for my website "forgesi.net".
In this case I have forgesi.net:80 www.forgesi.net:80 and forgesi.net:443 all redirecting to www.forgesi.net:443 so there are a couple bonus server containers in my configuration.
```
# Redirect HTTP traffic to www.forgesi.net:443
server {
listen 80;
listen [::]:80;
server_name forgesi.net www.forgesi.net;
return 301 https://www.forgesi.net$request_uri;
}
# Redirect forgesi.net HTTPS traffic to www.forgesi.net:443
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name forgesi.net;
ssl on;
ssl_certificate /etc/letsencrypt/live/forgesi.net/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/forgesi.net/privkey.pem; # managed by Certbot
include /etc/nginx/ssl_params;
return 301 https://www.forgesi.net$request_uri;
}
# Main Server Configuration
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name www.forgesi.net;
root /var/www/forgesi.net;
index index.html;
access_log /var/log/nginx/access.log;
ssl on;
ssl_certificate /etc/letsencrypt/live/forgesi.net/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/forgesi.net/privkey.pem; # managed by Certbot
include /etc/nginx/ssl_params;
# If a physical file exists in DocRoot host that, otherwise pass through to Ghost
location / {
try_files $uri $uri @forgesighost;
}
location @forgesighost {
proxy_pass http://127.0.0.1:13003;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
}
}
```
The SSL certificates in the above configuration were created by LetsEncrypt. When I originally created this file I just used whatever self-signed cert I had kicking around and then let LetsEncrypt take it over.
```
$ sudo apt install python-certbot-nginx
$ sudo certbot --nginx -d forgesi.net -d www.forgesi.net
$ sudo service nginx restart
```
The `/etc/nginx/ssl_params` file mentioned in the configuration are just some parameters that help be get a A+ rating on SSL labs:
![SSL Configuration](ssl.png)
```
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;
ssl_dhparam /etc/nginx/dhparam.pem;
add_header Strict-Transport-Security "max-age=31536000";
ssl_prefer_server_ciphers on;
```
After making the above configuration changes, starting the Ghost Docker image, and restarting nginx, I can now connect to my Ghost test server.
There are, however, some problems I need to address:
* The Ghost image doesn't know its server name (many links are pointing to localhost)
* The Ghost image doesn't know how to send email (for lost passwords)
* The Ghost process is running as user 1000 which overlaps with a local user account on the host system
* All content is stored within the Ghost image and lost when I restart/upgrade the image.
The following script deletes and recreates my Docker container and resolves the above issues.
* It uses Mailgun for mail delivery (Mailgun has a good free tier)
* It runs as user 1001 (A dedicated user for this)
* All data is persisted to /var/lib/ghost/forgesi on the server machine (making it easy to backup, and allowing it to persist across upgrades)
```sh
#!/bin/sh
docker rm -f ghost-forgesi
docker run --name ghost-forgesi \
-p 127.0.0.1:13003:2368 \
--user 1001 \
-e url=https://www.forgesi.net \
-e mail__transport=SMTP \
-e mail__from=noreply@mg.forgesi.net \
-e mail__options__service=Mailgun \
-e mail__options__auth__user=postmaster@mg.forgesi.net \
-e mail__options__auth__pass=ZZZZZZ \
-v /var/lib/ghost/forgesi:/var/lib/ghost/content \
--restart=always \
-d ghost:alpine
```
Now, when Ghost is upgraded, I can update my image with a docker pull ghost:alpine and then rebuild my container with the above script.

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

@ -0,0 +1,65 @@
---
title: NGINX Semi-private Site
date: 2018-12-20
comments: true
tags: [server]
toc: false
---
We used to run a development blog for work. We wanted:
1. To use NGINX to host this content. It was all static pages.
2. To limit access to people within our network, or to employees while outside the network (phones, laptops, etc.).
3. We didn't want to deal with user accounts, active directory, etc.
4. We wanted super low friction for users.
<!--more-->
The information wasn't super confidential but we didn't want it to be picked up by search engines and stuff like that.
What I settled on was the following:
1. Whitelist our public facing IPs. If you are hitting the server from one of our networks, you get in.
2. Add special handling of "?key=XXXX" in the query string. If there is a KEY, create a cookie called "key" and store whatever that value was in the cookie and then redirect to the site without that ?key= query string.
3. If the cookie exists and is set to a magic value, you get in.
4. Otherwise, 403!
This allows us to issue links to content on the blog in the form "http://blog/article?key=XXXXX" and they would always work regardless of where the user was connecting from. Once they did that once, subsequent visits to the blog would just work.
It's not super secure or brilliant but it was a handy solution and I'm documentation the configuration here mostly so that "Future Jeff" has it for reference :)
```
location / {
set $deny 1;
# If we see the magic "key" param in the query string, set
# the cookie and redirect to the site without the query string
if ($query_string ~ "(.*)key\=([A-Za-z0-9]+)(&?)(.*)") {
set $key $2;
add_header Set-Cookie "key=$key;Path=/;Max-Age=31536000";
rewrite ^(.*)$ $1? permanent;
}
# Clone this section for each whitelisted IP
if ($REMOTE_ADDR = "PUBLICIP") {
set $deny 0;
}
# Adjust the SECRETMAGICKEY here with some sort of magic key (A-Z, 0-9)
# This key is used to protect access to the entire site
if ($http_cookie ~* "SECRETMAGICKEY") {
set $deny 0;
}
# Allow access to .well-known folder so LetsEncrypt CertBot works.
if ($request_uri ~* "/.well-known/") {
set $deny 0;
}
if ($deny) {
return 403;
}
}
```

@ -0,0 +1,545 @@
---
title: Blog with Hugo, Gitlab CD, and Caddy
date: 2019-09-20
comments: true
tags: [server]
showToc: true
---
This post is an overview on how I setup this site (built using [Hugo](https://gohugo.io)) to be automatically deployed to my [Caddy](https://caddyserver.com/) server using [Gitlab's](https://gitlab.com) continuous deployment.
<!-- more -->
I routinely swap between fancy blogging tools like Ghost and Wordpress as well as static site generation tools like Jekyll and Hugo far more often than I should.
I keep coming back to static site generation for several reasons:
- **Server footprint** - It's easy to host static files. They can be easily deployed via Amazon S3, Digital Ocean Spaces, or any random web server technology. Static sites are trivial to scale. Static sites have a tighter security footprint (no giant PHP process handling requests). Static sites are faster (page generation done when content is added, not when it's viewed).
- **Longevity** - I like that I can zip up the contents of my website and store it somewhere and be pretty sure I'll be able to read it in 15 years.
<strike>The primary downside of a static blog is that you are stuck using tools like Disqus for comments (if you want those), which means that some 3rd party is capturing information about my visitors. </strike> I've added a bonus section below that details adding client-side search to the website so we can avoid passing visitor information to Google.
Another major annoyance for me has been deploying the static sites to production facing servers. This post is all about automating that.
So...
The source code for my blog is stored in a private git repository on my personal Gitlab server running out of my basement.
I wanted to automatically publish updates (commits) to my blog to a separate web server hosted by Digital Ocean. This means that when I commit to my blog project on Gitlab, I want to use Gitlab's CD mechanism to automatically build my blog and then copy it across to my public facing webserver.
This post details most of the steps required to set up my Hugo based blog to be automatically published when I update it.
# Caddy Server Setup
Let's start with the setup of the web server itself.
Historically I've used NGINX or Apache for hosting my websites. This time around, I selected Caddy. My primary reason for choosing Caddy was that it's something new and different (for me). I was also drawn to its simplicity, its seamless out-of-the-box HTTPS using LetsEncrypt, as well as HTTP/2 support by default.
## Caddy Installation
I started with a brand new Ubuntu 18.04 droplet on Digital Ocean (my VPS provider of choice).
Installing Caddy is a bit different because it's no (currently) in the package repos for Ubuntu. Instead, it's packaged as a single binary.
The first step is to download and install the Caddy server to `/usr/local/bin`.
```
curl https://getcaddy.com | sudo bash -s personal
```
I'm running Caddy on the base machine (not in Docker), so the next steps are to set it up to run happily under systemd. I followed the instructions [here](https://github.com/caddyserver/caddy/tree/master/dist/init/linux-systemd) (summarized below).
```
# allow Caddy to bind to 80 & 443
sudo setcap 'cap_net_bind_service=+ep' /usr/local/bin/caddy
# setup www-data user and group
sudo groupadd -g 33 www-data
sudo useradd \
-g www-data --no-user-group \
--home-dir /var/www --no-create-home \
--shell /usr/sbin/nologin \
--system --uid 33 www-data
sudo mkdir /etc/caddy
sudo chown -R root:root /etc/caddy
sudo mkdir /etc/ssl/caddy
sudo chown -R root:www-data /etc/ssl/caddy
sudo chmod 0770 /etc/ssl/caddy
# Setup placeholder Caddyfile
sudo touch /etc/caddy/Caddyfile
sudo chown root:root /etc/caddy/Caddyfile
sudo chmod 644 /etc/caddy/Caddyfile
# Setup /var/www
sudo mkdir /var/www
sudo chown www-data:www-data /var/www
sudo chmod 555 /var/www
# Add service for Caddy to systemd
wget https://raw.githubusercontent.com/caddyserver/caddy/master/dist/init/linux-systemd/caddy.service
sudo cp caddy.service /etc/systemd/system/
sudo chown root:root /etc/systemd/system/caddy.service
sudo chmod 644 /etc/systemd/system/caddy.service
sudo systemctl daemon-reload
sudo systemctl start caddy.service
sudo systemctl enable caddy.service
```
## Caddy Configuration
I'm hosting content for my blog (zeos.ca), so I'm going to create a folder under `/var/www` for my domain (zeos.ca), and add some configuration to my `/etc/caddy/Caddyfile`.
```
mkdir /var/www/zeos.ca
```
Next, I need to add a virtual host for zeos.ca to my Caddyfile `/etc/caddy/Caddyfile`:
```
zeos.ca {
root /var/www/zeos.ca
gzip
errors {
404 404.html
}
}
```
I'd also like to redirect www.zeos.ca to zeos.ca, so I'll also add the following to `/etc/caddy/Caddyfile`.
```
www.zeos.ca {
redir https://zeos.ca{uri}
}
```
At this point, assuming DNS is correctly pointing at this machine, I can restart Caddyserver and it will automatically grab an SSL certificate for zeos.ca and www.zeos.ca from LetsEncrypt, and start serving the contents of `/var/www/zeos.ca` for requests to zeos.ca.
```
sudo systemctl restart caddy.service
```
## Generating Deployment Keys
The final steps required on the webserver machine are to set ourselves up for automated deployment by generating a new user which we'll use for deployment (`deploy_zeos`), and creating a new SSH keypair for our deployment process.
```
sudo adduser --disabled-password deploy_zeos
```
Create a SSH keypair for our deployment keys (make sure not to enter a password!):
```
ssh-keygen -t ed25519 -f ~/deploy_zeos
```
Add the public key we just generated to the list of authorized keys `~/.ssh/authorized_keys`.
```
sudo -u deploy_zeos mkdir ~/.ssh
cat ~/deploy_zeos.pub | sudo -u deploy_zeos tee ~/.ssh/authorized_keys
```
Finally, copy the private contents of the private key (`~/deploy_zeos`) file somewhere safe. We'll need those in a second when configuring Gitlab.
It'll look something like this:
```
-----BEGIN OPENSSH PRIVATE KEY-----
...
...
-----END OPENSSH PRIVATE KEY-----
```
{{% warning %}}
The private key `~/deploy_zeos` needs to be protected. Anyone with that key can login to the `deploy_zeos` user on the webserver. Once Gitlab is setup (below), this file should be deleted from the webserver.
{{% /warning %}}
# Setting up Gitlab CI/CD
Beyond the scope of this post is setting up Gitlab build agents. Once you've got that up and running, setting up a CI/CD process for a Hugo project is pretty straightforward.
## Setting up the SSH environment variables
The first step is to give Gitlab our private SSH key (generated above) so that our CD process can publish builds to our webserver.
We do this by adding two variables to the CI/CD configuration (`Settings > CI/CD > Variables`) for the blog project.
| Variable Name | Variable Contents | State |
|---|---|---|
| SSH_PRIVATE_KEY | Contents of `~/deploy_zeos` from previous step | Protected |
| SSH_KNOWN_HOSTS | SSH fingerprint of web server. Output of `ssh-keyscan wilbur.zeos.ca`. | Not Protected |
It should end up looking something like this:
![Variables](variables.png)
## CI/CD Configuration
The next step is to set up a CI/CD deployment script and add it to the root of the blog repository (`.gitlab-ci.yml`).
My script has two phases: build and deploy. In the build phase, we build our blog using a Docker container with the latest and greatest Hugo binaries and store the generated output. In the deploy phase, we configure SSH and then rsync the output generated from the previous step to the webserver.
```
stages:
- build
- deploy
build:
stage: build
image: registry.gitlab.com/pages/hugo:latest
variables:
GIT_SUBMODULE_STRATEGY: recursive
script:
- hugo version
- hugo
artifacts:
paths:
- public
only:
- master
deploy:
stage: deploy
image: alpine:latest
before_script:
- apk update && apk add openssh-client bash rsync
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
- mkdir -p ~/.ssh
- chmod 700 ~/.ssh
- echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts
- chmod 644 ~/.ssh/known_hosts
environment:
name: production
url: zeos.ca
script:
- rsync -hrvz --delete --exclude=_ -e "ssh" --progress public/ deploy_zeos@wilbur.zeos.ca:/var/www/zeos.ca
only:
- master
```
Once this is checked in, new commits to the blog repository will automatically trigger a build and publish to the `/var/www/zeos.ca` folder on the webserver using the Gitlab CI/CD functionality.
You can see build history / monitor the pipelines under the `CI/CD > Pipelines` menu in the repository. It'll show information about each build. If the build fails, clicking on the failed step should allow you to see the log and troubleshoot the whole thing.
![Pipelines](pipelines.png)
# Searching (bonus)
![Search Animation](search.gif)
Rather than complicating my deployment and adding some sort of server-side searching, or outsourcing search to something like Google, I've added client-side searching using [lunr](https://lunrjs.com/). Adding client-side searching to a Hugo site, and adding that to the build process is relatively easy.
The following is largely based on [this gist](https://gist.github.com/sebz/efddfc8fdcb6b480f567).
## Generating an Index
For development and testing, you'll need NodeJS installed, and the following packages
```
npm install -g grunt
# in your website root
npm install grunt yamljs string
```
The following `Gruntfile.js` handles the building of an index by processing all markdown and HTML files and creating JSON index in `public/lunr.json`.
```js
var yaml = require("yamljs");
var S = require("string");
var CONTENT_PATH_PREFIX = "content";
module.exports = function(grunt) {
grunt.registerTask("lunr-index", function() {
grunt.log.writeln("Build pages index");
var indexPages = function() {
var pagesIndex = [];
grunt.file.recurse(CONTENT_PATH_PREFIX, function(abspath, rootdir, subdir, filename) {
grunt.verbose.writeln("Parse file:",abspath);
var processedFile = processFile(abspath, filename);
if (processedFile) {
pagesIndex.push(processedFile);
}
});
return pagesIndex;
};
var processFile = function(abspath, filename) {
var pageIndex;
if (S(filename).endsWith(".html")) {
pageIndex = processHTMLFile(abspath, filename);
} else if (S(filename).endsWith(".md")) {
pageIndex = processMDFile(abspath, filename);
}
return pageIndex;
};
var processHTMLFile = function(abspath, filename) {
var content = grunt.file.read(abspath);
var pageName = S(filename).chompRight(".html").s;
var href = S(abspath)
.chompLeft(CONTENT_PATH_PREFIX).s;
return {
title: pageName,
href: href,
content: S(content).trim().stripTags().stripPunctuation().s
};
};
var processMDFile = function(abspath, filename) {
var content = grunt.file.read(abspath);
var pageIndex;
// First separate the Front Matter from the content and parse it
content = content.split("---");
var frontMatter;
try {
frontMatter = yaml.parse(content[1].trim());
} catch (e) {
grunt.log.writeln(filename + " - " + e.message);
return;
}
var href = S(abspath).chompLeft(CONTENT_PATH_PREFIX).chompRight(".md").s;
// href for index.md files stops at the folder name
if (filename === "index.md") {
href = S(abspath).chompLeft(CONTENT_PATH_PREFIX).chompRight(filename).s;
}
// Skip drafts
if (frontMatter.draft) {
return;
}
// Build Lunr index for this page
pageIndex = {
title: frontMatter.title,
tags: frontMatter.tags,
href: href,
content: S(content[2]).trim().stripTags().stripPunctuation().s
};
return pageIndex;
};
grunt.file.write("public/lunr.json", JSON.stringify(indexPages()));
grunt.log.ok("Index built");
});
};
```
The lunr index build can be run manually with the following in your website root:
```
grunt lunr-index
```
This will create `public/lunr.json` which contains the text content from all the website content:
```json
[{"title":"About me","href":"/about/","content":" My name is Jeff Clement I ride bikes and unicycles I make things usual
ly out of wood I make software I love computer security and privacy Contact Information EMail jeff at zeos dot ca Wire j
fry fingerprints Threema TUKDSKM6 fingerprint cb8a1e3e2ea4e8d9905d44f049efb36a GnuPG 0x76B1A823FCC65FA3 E8FF 07F8 CC8B 9
5..."}, ...]
```
## Adding Searching to the Template
I added the following "search" button that pops up a modal search window (I'm using Bootstrap):
```html
<a href="#modalSearch" data-toggle="modal" data-target="#modalSearch" style="outline: none;">
<span class="hidden-sm hidden-md hidden-lg">search</span> <span id="searchglyph" class="fas fa-search"></span>
</a>
```
And then the following modelSearch window:
```html
<div id="modalSearch" class="modal fade" role="dialog">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal">&times;</button>
<h4 class="modal-title">Search</h4>
</div>
<div class="modal-body">
<form style="padding-bottom: 10px">
<div class="input-group">
<span class="input-group-addon" id="basic-addon1"><span class="fa fa-search"></span></span>
<input type="text" id="search" class="form-control" placeholder="Search" aria-describedby="basic-addon1">
</div>
</form>
<div id="resultsPane" style="display: none;">
<h4>Results</h4>
<ul id="results">
</ul>
</div>
<div id="noResultsPane" style="display: none;">
There are no search results for this search.
</div>
<div style="font-size: 8pt; color: #888; padding-top: 10px;">
Use "*" in your search as a wildcard (i.e. "linu*")
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
```
The following JavaScript, handles the searching behaviour:
```js
var lunrIndex, pagesIndex;
function initLunr() {
// First retrieve the index file
$.getJSON("/lunr.json")
.done(function(index) {
pagesIndex = index;
// Set up lunrjs by declaring the fields we use
// Also provide their boost level for the ranking