Restructure yaml, add CI/CD pipelines

This commit is contained in:
red 2025-04-08 12:17:27 -04:00
parent fc1bf0bb9a
commit fd7d99d29a
31 changed files with 771 additions and 543 deletions

74
.woodpecker/build.yaml Normal file
View file

@ -0,0 +1,74 @@
when:
- branch: master
event: [push, pull_request]
clone:
git:
image: woodpeckerci/plugin-git
settings:
recursive: true
# TODO: extend this to use an image that has our root certificate baked in
skip-verify: true
steps:
pleroma:
image: woodpeckerci/plugin-kaniko
settings:
registry: cr.forge.lan
repo: darkdork.dev/pleroma
context: pleroma
tags:
- ${CI_COMMIT_SHA}
- prod
cache: true
skip-tls-verify: true
username:
from_secret:
docker_registry_username
password:
from_secret:
docker_registry_password
when:
event: [pull_request, push]
depends_on: []
privoxy:
image: woodpecker-ci/plugin-kaniko
settings:
registry: cr.forge.lan
repo: darkdork.dev/privoxy
context: privoxy
tags:
- ${CI_COMMIT_SHA}
- prod
cache: true
skip-tls-verify: true
username:
from_secret:
docker_registry_username
password:
from_secret:
docker_registry_password
when:
event: [pull_request, push]
depends_on: []
tor:
image: woodpecker-ci/plugin-kaniko
settings:
registry: cr.forge.lan
repo: darkdork.dev/tor
context: privoxy
tags:
- ${CI_COMMIT_SHA}
- prod
cache: true
skip-tls-verify: true
username:
from_secret:
docker_registry_username
password:
from_secret:
docker_registry_password
when:
event: [pull_request, push]
depends_on: []

89
.woodpecker/deploy.yaml Normal file
View file

@ -0,0 +1,89 @@
when:
- branch: master
event: [push, pull_request]
clone:
git:
image: woodpeckerci/plugin-git
settings:
recursive: true
# TODO: extend this to use an image that has our root certificate baked in
skip-verify: true
steps:
darkdork.dev:
image: cr.forge.lan/alk8s/alk8s:latest
pull: true
environment:
KUBECONFIG_BASE64:
from_secret: kubeconfig_base64
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
commands:
- mkdir -p ~/.kube
- echo $KUBECONFIG_BASE64 | base64 -d > ~/.kube/config
- pwd
- kubectl apply -Rf manifests/darkdork.dev
postgres:
image: cr.forge.lan/alk8s/alk8s:latest
pull: true
environment:
KUBECONFIG_BASE64:
from_secret: kubeconfig_base64
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
commands:
- mkdir -p ~/.kube
- echo $KUBECONFIG_BASE64 | base64 -d > ~/.kube/config
- pwd
- kubectl apply -Rf manifests/postgres
minio:
image: cr.forge.lan/alk8s/alk8s:latest
pull: true
environment:
KUBECONFIG_BASE64:
from_secret: kubeconfig_base64
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
commands:
- mkdir -p ~/.kube
- echo $KUBECONFIG_BASE64 | base64 -d > ~/.kube/config
- pwd
- kubectl apply -Rf manifests/minio
tor:
image: cr.forge.lan/alk8s/alk8s:latest
pull: true
environment:
KUBECONFIG_BASE64:
from_secret: kubeconfig_base64
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
commands:
- mkdir -p ~/.kube
- echo $KUBECONFIG_BASE64 | base64 -d > ~/.kube/config
- pwd
- kubectl apply -Rf manifests/tor
- envsubst < manifests/tor/deployment.yaml.template | kubectl apply -f -
privoxy:
image: cr.forge.lan/alk8s/alk8s:latest
pull: true
environment:
KUBECONFIG_BASE64:
from_secret: kubeconfig_base64
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
commands:
- mkdir -p ~/.kube
- echo $KUBECONFIG_BASE64 | base64 -d > ~/.kube/config
- pwd
- kubectl apply -Rf manifests/privoxy
- envsubst < manifests/privoxy/deployment.yaml.template | kubectl apply -f -
pleroma:
image: cr.forge.lan/alk8s/alk8s:latest
pull: true
environment:
KUBECONFIG_BASE64:
from_secret: kubeconfig_base64
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
commands:
- mkdir -p ~/.kube
- echo $KUBECONFIG_BASE64 | base64 -d > ~/.kube/config
- pwd
- kubectl -Rf manifests/pleroma
- envsubst < manifests/pleroma/deployment.yaml.template | kubectl apply -f -

45
.woodpecker/pleroma.yaml Normal file
View file

@ -0,0 +1,45 @@
when:
- branch: master
event: [push, pull_request]
clone:
git:
image: woodpeckerci/plugin-git
settings:
recursive: true
# TODO: extend this to use an image that has our root certificate baked in
skip-verify: true
steps:
publish:
image: woodpeckerci/plugin-kaniko
settings:
registry: cr.forge.lan
repo: darkdork.dev/pleroma
context: pleroma
tags:
- ${CI_COMMIT_SHA}
- prod
cache: true
skip-tls-verify: true
username:
from_secret:
docker_registry_username
password:
from_secret:
docker_registry_password
when:
event: [pull_request, push]
deploy:
image: cr.forge.lan/alk8s/alk8s:latest
pull: true
environment:
KUBECONFIG_BASE64:
from_secret: kubeconfig_base64
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
commands:
- mkdir -p ~/.kube
- echo $KUBECONFIG_BASE64 | base64 -d > ~/.kube/config
- pwd
- envsubst < manifests/pleroma/deployment.template.yaml | kubectl apply -f -

0
.woodpecker/privoxy Normal file
View file

0
.woodpecker/tor.yaml Normal file
View file

View file

@ -0,0 +1,41 @@
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: darkdork-dev
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: pwm@crlf.ninja
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
ingressClassName: nginx
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: darkdork-dev
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: pwm@crlf.ninja
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
ingressClassName: nginx
---

View file

@ -0,0 +1,6 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: darkdork-dev
---

View file

@ -0,0 +1,28 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-ssd
namespace: darkdork-dev
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "2880" # 48 hours in minutes
fromBackup: ""
fsType: "xfs"
# backupTargetName: "default"
# mkfsParams: "-I 256 -b 4096 -O ^metadata_csum,^64bit"
# diskSelector: "ssd,fast"
# nodeSelector: "storage,fast"
# recurringJobSelector: '[
# {
# "name":"snap",
# "isGroup":true,
# },
# {
# "name":"backup",
# "isGroup":false,
# }
# ]'
---

View file

@ -1,131 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-init
namespace: darkdork-dev
data:
init-db.sh: |
#!/bin/bash
set -e
DB_USER=${DB_USER:-pleroma}
DB_NAME=${DB_NAME:-pleroma}
psql -U ${POSTGRES_USER:-postgres} -tc "SELECT 1 FROM pg_user WHERE usename = '$DB_USER'" | \
grep -q 1 || psql -U postgres -c "CREATE USER $DB_USER WITH ENCRYPTED PASSWORD '$DB_PASS'"
psql -U ${POSTGRES_USER:-postgres} -tc "SELECT 1 FROM pg_database WHERE datname = '$DB_NAME'" | \
grep -q 1 || psql -U postgres -c "CREATE DATABASE $DB_NAME OWNER $DB_USER"
psql -v ON_ERROR_STOP=1 --username "${POSTGRES_USER:-postgres}" --dbname "$DB_NAME" <<-EOSQL
CREATE EXTENSION IF NOT EXISTS citext;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
EOSQL
---
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-config
namespace: darkdork-dev
data:
postgresql.conf: |
# DB Version: 17
# OS Type: linux
# DB Type: web
# Total Memory (RAM): 4 GB
# Data Storage: ssd
max_connections = 200
shared_buffers = 1GB
effective_cache_size = 3GB
maintenance_work_mem = 256MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 2621kB
huge_pages = off
min_wal_size = 1GB
max_wal_size = 4GB
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
namespace: darkdork-dev
labels:
app: postgres
spec:
storageClassName: longhorn-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: postgres
namespace: darkdork-dev
spec:
ports:
- port: 5432
targetPort: 5432
protocol: TCP
selector:
app: postgres
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres
namespace: darkdork-dev
spec:
replicas: 1
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
imagePullSecrets:
- name: registry-credentials
containers:
- name: postgres
image: postgres:17-alpine
imagePullPolicy: Always
volumeMounts:
- name: postgres-data-volume
mountPath: /var/lib/postgresql/data
- name: postgres-init-volume
mountPath: /docker-entrypoint-initdb.d
- name: postgres-config-volume
mountPath: /etc/postgresql/postgresql.conf
subPath: postgresql.conf
ports:
- containerPort: 5432
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres
key: postgres-password
- name: DB_PASS
valueFrom:
secretKeyRef:
name: postgres
key: postgres-password
volumes:
- name: postgres-data-volume
persistentVolumeClaim:
claimName: postgres-pvc
- name: postgres-init-volume
configMap:
name: postgres-init
defaultMode: 0755
- name: postgres-config-volume
configMap:
name: postgres-config

View file

@ -1,77 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: privoxy
namespace: darkdork-dev
spec:
ports:
- port: 8118
targetPort: 8118
protocol: TCP
selector:
app: privoxy
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: privoxy
namespace: darkdork-dev
spec:
replicas: 1
selector:
matchLabels:
app: privoxy
template:
metadata:
labels:
app: privoxy
spec:
imagePullSecrets:
- name: registry-credentials
containers:
- name: privoxy
image: cr.forge.lan/darkdork-dev/privoxy
imagePullPolicy: Always
ports:
- containerPort: 8118
volumeMounts:
- name: privoxy-config
mountPath: /etc/privoxy/config
subPath: config
volumes:
- name: privoxy-config
configMap:
name: privoxy-config
---
apiVersion: v1
kind: ConfigMap
metadata:
name: privoxy-config
namespace: darkdork-dev
data:
config: |
confdir /etc/privoxy
logdir /var/log/privoxy
logfile privoxy.log
listen-address 0.0.0.0:8118
toggle 0
enable-remote-toggle 0
enable-remote-http-toggle 0
enable-edit-actions 0
enforce-blocks 0
buffer-limit 4096
enable-proxy-authentication-forwarding 0
forwarded-connect-retries 0
accept-intercepted-requests 0
allow-cgi-request-crunching 0
split-large-forms 0
keep-alive-timeout 5
tolerate-pipelining 1
socket-timeout 300
# debug 13313
forward-socks5 / 10.8.0.1:1080 .
forward-socks5t .onion tor:9050 .
forward minio/ .

View file

@ -1,38 +1,4 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-pvc
namespace: darkdork-dev
labels:
app: minio
spec:
storageClassName: longhorn-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage:
10Gi
---
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: darkdork-dev
spec:
ports:
- port: 80
name: minio
targetPort: 9000
protocol: TCP
- port: 9001
name: minio-admin
targetPort: 9001
protocol: TCP
selector:
app: minio
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -72,4 +38,5 @@ spec:
volumes:
- name: minio-data-volume
persistentVolumeClaim:
claimName: minio-pvc
claimName: minio-pvc
---

View file

@ -0,0 +1,28 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio
namespace: darkdork-dev
annotations:
cert-manager.io/issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/rewrite-target: /pleroma.darkdork.dev/$1
nginx.ingress.kubernetes.io/proxy-body-size: 64m
spec:
ingressClassName: nginx
tls:
- hosts:
- media.darkdork.dev
secretName: tls-secret-media
rules:
- host: media.darkdork.dev
http:
paths:
- path: /(.+)
pathType: ImplementationSpecific
backend:
service:
name: minio
port:
number: 80
---

17
manifests/minio/pvc.yaml Normal file
View file

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-pvc
namespace: darkdork-dev
labels:
app: minio
spec:
storageClassName: longhorn-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage:
10Gi
---

View file

@ -0,0 +1,19 @@
---
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: darkdork-dev
spec:
ports:
- port: 80
name: minio
targetPort: 9000
protocol: TCP
- port: 9001
name: minio-admin
targetPort: 9001
protocol: TCP
selector:
app: minio
---

View file

@ -1,126 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: darkdork-dev
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-ssd
namespace: darkdork-dev
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "2880" # 48 hours in minutes
fromBackup: ""
fsType: "xfs"
# backupTargetName: "default"
# mkfsParams: "-I 256 -b 4096 -O ^metadata_csum,^64bit"
# diskSelector: "ssd,fast"
# nodeSelector: "storage,fast"
# recurringJobSelector: '[
# {
# "name":"snap",
# "isGroup":true,
# },
# {
# "name":"backup",
# "isGroup":false,
# }
# ]'
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pleroma
namespace: darkdork-dev
annotations:
cert-manager.io/issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/proxy-body-size: 64m
spec:
ingressClassName: nginx
tls:
- hosts:
- darkdork.dev
secretName: tls-secret
rules:
- host: darkdork.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pleroma
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio
namespace: darkdork-dev
annotations:
cert-manager.io/issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/rewrite-target: /pleroma.darkdork.dev/$1
nginx.ingress.kubernetes.io/proxy-body-size: 64m
spec:
ingressClassName: nginx
tls:
- hosts:
- media.darkdork.dev
secretName: tls-secret-media
rules:
- host: media.darkdork.dev
http:
paths:
- path: /(.+)
pathType: ImplementationSpecific
backend:
service:
name: minio
port:
number: 80
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: darkdork-dev
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: pwm@crlf.ninja
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
ingressClassName: nginx
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: darkdork-dev
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: pwm@crlf.ninja
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
ingressClassName: nginx

View file

@ -1,155 +1,5 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pleroma-pvc
namespace: darkdork-dev
labels:
app: pleroma
spec:
storageClassName: longhorn-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage:
10Gi
---
apiVersion: v1
kind: Service
metadata:
name: pleroma
namespace: darkdork-dev
spec:
ports:
- port: 80
targetPort: 4000
protocol: TCP
selector:
app: pleroma
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pleroma
namespace: darkdork-dev
spec:
replicas: 1
selector:
matchLabels:
app: pleroma
template:
metadata:
labels:
app: pleroma
spec:
imagePullSecrets:
- name: registry-credentials
initContainers:
- name: pleroma-static-files
image: cr.forge.lan/darkdork-dev/pleroma
command: [ "sh", "-c", "mkdir -p ${DATA}/uploads && mkdir -p ${DATA}/static && cp -rf /static-files/* ${DATA}/static && chown -R 1000:1000 /var/lib/pleroma" ]
- name: pleroma-database-wait
image: cr.forge.lan/darkdork-dev/pleroma
command: [ "sh", "-c", "while ! pg_isready -U ${DB_USER} -d postgres://${DB_HOST}:${DB_PORT}/${DB_NAME} -t 1; do sleep 1s; done;" ]
env:
- name: DB_HOST
value: postgres
- name: DB_NAME
value: pleroma
- name: DB_USER
value: pleroma
- name: pleroma-migrate
image: cr.forge.lan/darkdork-dev/pleroma
command: [ "sh", "-c", "exec", "${HOME}/bin/pleroma_ctl migrate" ]
containers:
- name: pleroma
image: cr.forge.lan/darkdork-dev/pleroma
imagePullPolicy: Always
ports:
- containerPort: 4000
env:
- name: DOMAIN
value: darkdork.dev
- name: INSTANCE_NAME
value: DarkDork.dev
- name: ADMIN_EMAIL
value: pwm@crlf.ninja
- name: NOTIFY_EMAIL
value: pleroma@crlf.ninja
- name: REGISTRATIONS_OPEN
value: "false"
- name: INVITES_ENABLED
value: "true"
- name: SECRET_KEY_BASE
valueFrom:
secretKeyRef:
name: pleroma
key: secret-key-base
- name: WEB_PUSH_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: web-push-public-key
- name: WEB_PUSH_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: web-push-private-key
- name: DEFAULT_SIGNER
valueFrom:
secretKeyRef:
name: pleroma
key: default-signer
- name: MEDIA_URL
value: "https://media.darkdork.dev"
- name: S3_BUCKET
value: pleroma.darkdork.dev
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: minio-access-key
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: minio-secret-key
- name: S3_SCHEME
value: "http://"
- name: S3_HOST
value: minio
- name: S3_PORT
value: "80"
- name: DB_HOST
value: postgres
- name: DB_NAME
value: pleroma
- name: DB_USER
value: pleroma
- name: DB_PASS
valueFrom:
secretKeyRef:
name: postgres
key: postgres-password
volumeMounts:
- name: pleroma-data-volume
mountPath: /var/lib/pleroma
- name: pleroma-config-volume
mountPath: /etc/pleroma/config.exs
subPath: config.exs
volumes:
- name: pleroma-data-volume
persistentVolumeClaim:
claimName: pleroma-pvc
- name: pleroma-config-volume
configMap:
name: pleroma-config
defaultMode: 0640 # Pleroma is picky about config file permissions.
securityContext:
fsGroup: 1000 # Ensures plperoma can still read the config file
---
apiVersion: v1
kind: ConfigMap
metadata:
name: pleroma-config
@ -409,5 +259,8 @@ data:
{"cunny.beauty", "Chomo: Type A (Common)"},
{"burggit.moe", "Chomo: Type A (Common)"},
{"mostr.pub", "NNNNGGGGGGHHHHH I KILL YOU"},
{"furville.drinkanddrive.africa", "N/A"}
]
{"furville.drinkanddrive.africa", "N/A"},
{"cutiepaws.org", "Chomo: Type B (Furry)"},
{"baise-moi.top", "Chomo: Type A (Common)"},
]
---

View file

@ -0,0 +1,122 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pleroma
namespace: darkdork-dev
spec:
replicas: 1
selector:
matchLabels:
app: pleroma
template:
metadata:
labels:
app: pleroma
spec:
imagePullSecrets:
- name: registry-credentials
initContainers:
- name: pleroma-static-files
image: cr.forge.lan/darkdork-dev/pleroma:${CI_COMMIT_SHA}
command: [ "sh", "-c", "mkdir -p ${DATA}/uploads && mkdir -p ${DATA}/static && cp -rf /static-files/* ${DATA}/static && chown -R 1000:1000 /var/lib/pleroma" ]
- name: pleroma-database-wait
image: cr.forge.lan/darkdork-dev/pleroma:${CI_COMMIT_SHA}
command: [ "sh", "-c", "while ! pg_isready -U ${DB_USER} -d postgres://${DB_HOST}:${DB_PORT}/${DB_NAME} -t 1; do sleep 1s; done;" ]
env:
- name: DB_HOST
value: postgres
- name: DB_NAME
value: pleroma
- name: DB_USER
value: pleroma
- name: pleroma-migrate
image: cr.forge.lan/darkdork-dev/pleroma:${CI_COMMIT_SHA}
command: [ "sh", "-c", "exec", "${HOME}/bin/pleroma_ctl migrate" ]
containers:
- name: pleroma
image: cr.forge.lan/darkdork-dev/pleroma:${CI_COMMIT_SHA}
imagePullPolicy: Always
ports:
- containerPort: 4000
env:
- name: DOMAIN
value: darkdork.dev
- name: INSTANCE_NAME
value: DarkDork.dev
- name: ADMIN_EMAIL
value: pwm@crlf.ninja
- name: NOTIFY_EMAIL
value: pleroma@crlf.ninja
- name: REGISTRATIONS_OPEN
value: "false"
- name: INVITES_ENABLED
value: "true"
- name: SECRET_KEY_BASE
valueFrom:
secretKeyRef:
name: pleroma
key: secret-key-base
- name: WEB_PUSH_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: web-push-public-key
- name: WEB_PUSH_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: web-push-private-key
- name: DEFAULT_SIGNER
valueFrom:
secretKeyRef:
name: pleroma
key: default-signer
- name: MEDIA_URL
value: "https://media.darkdork.dev"
- name: S3_BUCKET
value: pleroma.darkdork.dev
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: minio-access-key
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: pleroma
key: minio-secret-key
- name: S3_SCHEME
value: "http://"
- name: S3_HOST
value: minio
- name: S3_PORT
value: "80"
- name: DB_HOST
value: postgres
- name: DB_NAME
value: pleroma
- name: DB_USER
value: pleroma
- name: DB_PASS
valueFrom:
secretKeyRef:
name: postgres
key: postgres-password
volumeMounts:
- name: pleroma-data-volume
mountPath: /var/lib/pleroma
- name: pleroma-config-volume
mountPath: /etc/pleroma/config.exs
subPath: config.exs
volumes:
- name: pleroma-data-volume
persistentVolumeClaim:
claimName: pleroma-pvc
- name: pleroma-config-volume
configMap:
name: pleroma-config
defaultMode: 0640 # Pleroma is picky about config file permissions.
securityContext:
fsGroup: 1000 # Ensures plperoma can still read the config file
---

View file

@ -0,0 +1,27 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pleroma
namespace: darkdork-dev
annotations:
cert-manager.io/issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/proxy-body-size: 64m
spec:
ingressClassName: nginx
tls:
- hosts:
- darkdork.dev
secretName: tls-secret
rules:
- host: darkdork.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pleroma
port:
number: 80
---

View file

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pleroma-pvc
namespace: darkdork-dev
labels:
app: pleroma
spec:
storageClassName: longhorn-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage:
10Gi
---

View file

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: Service
metadata:
name: pleroma
namespace: darkdork-dev
spec:
ports:
- port: 80
targetPort: 4000
protocol: TCP
selector:
app: pleroma
---

View file

@ -0,0 +1,49 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-init
namespace: darkdork-dev
data:
init-db.sh: |
#!/bin/bash
set -e
DB_USER=${DB_USER:-pleroma}
DB_NAME=${DB_NAME:-pleroma}
psql -U ${POSTGRES_USER:-postgres} -tc "SELECT 1 FROM pg_user WHERE usename = '$DB_USER'" | \
grep -q 1 || psql -U postgres -c "CREATE USER $DB_USER WITH ENCRYPTED PASSWORD '$DB_PASS'"
psql -U ${POSTGRES_USER:-postgres} -tc "SELECT 1 FROM pg_database WHERE datname = '$DB_NAME'" | \
grep -q 1 || psql -U postgres -c "CREATE DATABASE $DB_NAME OWNER $DB_USER"
psql -v ON_ERROR_STOP=1 --username "${POSTGRES_USER:-postgres}" --dbname "$DB_NAME" <<-EOSQL
CREATE EXTENSION IF NOT EXISTS citext;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
EOSQL
---
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-config
namespace: darkdork-dev
data:
postgresql.conf: |
# DB Version: 17
# OS Type: linux
# DB Type: web
# Total Memory (RAM): 4 GB
# Data Storage: ssd
max_connections = 200
shared_buffers = 1GB
effective_cache_size = 3GB
maintenance_work_mem = 256MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 2621kB
huge_pages = off
min_wal_size = 1GB
max_wal_size = 4GB
---

View file

@ -0,0 +1,55 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres
namespace: darkdork-dev
spec:
replicas: 1
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
imagePullSecrets:
- name: registry-credentials
containers:
- name: postgres
image: postgres:17-alpine
imagePullPolicy: Always
volumeMounts:
- name: postgres-data-volume
mountPath: /var/lib/postgresql/data
- name: postgres-init-volume
mountPath: /docker-entrypoint-initdb.d
- name: postgres-config-volume
mountPath: /etc/postgresql/postgresql.conf
subPath: postgresql.conf
ports:
- containerPort: 5432
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres
key: postgres-password
- name: DB_PASS
valueFrom:
secretKeyRef:
name: postgres
key: postgres-password
volumes:
- name: postgres-data-volume
persistentVolumeClaim:
claimName: postgres-pvc
- name: postgres-init-volume
configMap:
name: postgres-init
defaultMode: 0755
- name: postgres-config-volume
configMap:
name: postgres-config
---

View file

@ -0,0 +1,16 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
namespace: darkdork-dev
labels:
app: postgres
spec:
storageClassName: longhorn-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---

View file

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: Service
metadata:
name: postgres
namespace: darkdork-dev
spec:
ports:
- port: 5432
targetPort: 5432
protocol: TCP
selector:
app: postgres
---

View file

@ -0,0 +1,31 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: privoxy-config
namespace: darkdork-dev
data:
config: |
confdir /etc/privoxy
logdir /var/log/privoxy
logfile privoxy.log
listen-address 0.0.0.0:8118
toggle 0
enable-remote-toggle 0
enable-remote-http-toggle 0
enable-edit-actions 0
enforce-blocks 0
buffer-limit 4096
enable-proxy-authentication-forwarding 0
forwarded-connect-retries 0
accept-intercepted-requests 0
allow-cgi-request-crunching 0
split-large-forms 0
keep-alive-timeout 5
tolerate-pipelining 1
socket-timeout 300
# debug 13313
forward-socks5 / 10.8.0.1:1080 .
forward-socks5t .onion tor:9050 .
forward minio/ .

View file

@ -0,0 +1,33 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: privoxy
namespace: darkdork-dev
spec:
replicas: 1
selector:
matchLabels:
app: privoxy
template:
metadata:
labels:
app: privoxy
spec:
imagePullSecrets:
- name: registry-credentials
containers:
- name: privoxy
image: cr.forge.lan/darkdork-dev/privoxy:${CI_COMMIT_HASH}
imagePullPolicy: Always
ports:
- containerPort: 8118
volumeMounts:
- name: privoxy-config
mountPath: /etc/privoxy/config
subPath: config
volumes:
- name: privoxy-config
configMap:
name: privoxy-config
---

View file

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: Service
metadata:
name: privoxy
namespace: darkdork-dev
spec:
ports:
- port: 8118
targetPort: 8118
protocol: TCP
selector:
app: privoxy
---

View file

@ -0,0 +1,10 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: tor-config
namespace: darkdork-dev
data:
torrc: |
SocksPort 0.0.0.0:9050
---

View file

@ -1,17 +1,4 @@
---
apiVersion: v1
kind: Service
metadata:
name: tor
namespace: darkdork-dev
spec:
ports:
- port: 9050
targetPort: 9050
protocol: TCP
selector:
app: tor
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -31,7 +18,7 @@ spec:
- name: registry-credentials
containers:
- name: tor
image: cr.forge.lan/darkdork-dev/tor
image: cr.forge.lan/darkdork-dev/tor:${CI_COMMIT_HASH}
imagePullPolicy: Always
ports:
- containerPort: 8118
@ -44,11 +31,3 @@ spec:
configMap:
name: tor-config
---
apiVersion: v1
kind: ConfigMap
metadata:
name: tor-config
namespace: darkdork-dev
data:
torrc: |
SocksPort 0.0.0.0:9050

View file

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: Service
metadata:
name: tor
namespace: darkdork-dev
spec:
ports:
- port: 9050
targetPort: 9050
protocol: TCP
selector:
app: tor
---