From 8e2a5236ac2c900b2aff80cf21f59a807518acf9 Mon Sep 17 00:00:00 2001 From: continuist Date: Sat, 20 Sep 2025 21:56:32 -0400 Subject: [PATCH] Fix nginx issues --- .forgejo/workflows/ci.yml | 44 ++++++++++++++++++++++++++++----------- nginx/nginx.conf | 13 ++++++++---- 2 files changed, 41 insertions(+), 16 deletions(-) diff --git a/.forgejo/workflows/ci.yml b/.forgejo/workflows/ci.yml index 6053135..a941d8c 100644 --- a/.forgejo/workflows/ci.yml +++ b/.forgejo/workflows/ci.yml @@ -240,30 +240,50 @@ jobs: --password-stdin \ "${{ secrets.REGISTRY_HOST }}" - - name: Pull production images + - name: Pull production images (optional but faster on play) run: | podman --remote pull "$REGISTRY_HOST/$APP_NAME/sharenet-backend-api-postgres:$IMAGE_TAG" podman --remote pull "$REGISTRY_HOST/$APP_NAME/sharenet-frontend:$IMAGE_TAG" - - name: Render nginx.conf and put on host (no unshare) + # RENDER nginx.conf FROM REPO AND COPY TO HOST (no unshare) + - name: Render nginx.conf and write to host run: | set -euo pipefail - apk add --no-cache gettext >/dev/null # envsubst - # Render template locally in the job container + apk add --no-cache gettext >/dev/null # provides envsubst + # Render with your CI env (PROD_* vars) envsubst < nginx/nginx.conf > /tmp/nginx.conf - # Write it to the host via a remote Podman helper container. - # Run as uid:gid 1001:1001 so writes match prod-service’s ownership. + # Copy to host via remote Podman bind-mount; keep prod-service uid/gid podman --remote run --rm -i \ --userns=keep-id \ -v /opt/sharenet/nginx:/host-nginx:rw \ alpine:3.20 sh -c 'install -D -m 0644 /dev/stdin /host-nginx/nginx.conf' \ < /tmp/nginx.conf - - name: Install envsubst (Alpine) - run: apk add --no-cache gettext - - - name: Deploy production pod + # TRY ZERO-DOWNTIME RELOAD FIRST + - name: Reload in-pod Nginx (or restart on failure) + continue-on-error: true run: | - # Process the pod template with environment variables - envsubst < deploy/prod-pod.yml | podman --remote kube play - \ No newline at end of file + set -euo pipefail + podman --remote exec sharenet-production-pod-nginx nginx -t + podman --remote exec sharenet-production-pod-nginx nginx -s reload + + - name: Fallback restart Nginx container if reload failed + if: failure() + run: | + set -euo pipefail + podman --remote restart sharenet-production-pod-nginx + + # (Re)APPLY THE POD (ensures new images/config picked up) + - name: Recreate pod (down & play) + run: | + set -euo pipefail + podman --remote kube down sharenet-production-pod || true + # Render your pod manifest (uses same $ENV as before) + envsubst < deploy/prod-pod.yml | podman --remote kube play - + + # VERIFY + - name: Verify in-pod nginx is healthy + run: | + set -euo pipefail + curl -sS -D- http://127.0.0.1:18080/healthz diff --git a/nginx/nginx.conf b/nginx/nginx.conf index 67fdd53..5d16e57 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -1,6 +1,7 @@ user nginx; worker_processes auto; pid /var/run/nginx.pid; + events { worker_connections 1024; } http { @@ -20,18 +21,22 @@ http { # frontend default location / { - proxy_pass http://127.0.0.1:${PROD_FRONTEND_PORT}; + proxy_http_version 1.1; proxy_set_header Host $host; - proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Connection ""; + proxy_pass http://127.0.0.1:${PROD_FRONTEND_PORT}; } # backend API location /api/ { - proxy_pass http://127.0.0.1:${PROD_BACKEND_PORT}/; + proxy_http_version 1.1; proxy_set_header Host $host; - proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Connection ""; + proxy_pass http://127.0.0.1:${PROD_BACKEND_PORT}/; } } }