harden invite abuse controls and deployment smoke checks

This commit is contained in:
Nico 2026-02-14 11:52:18 -08:00
parent 3ee1a87d58
commit 4d09d7e5b4
12 changed files with 273 additions and 6 deletions

View File

@ -5,7 +5,7 @@ import { fileURLToPath } from "node:url";
import dotenv from "dotenv"; import dotenv from "dotenv";
import getPool from "../lib/server/db"; import getPool from "../lib/server/db";
import { ApiError } from "../lib/server/errors"; import { ApiError } from "../lib/server/errors";
import { enforceAuthRateLimit, enforceUserWriteRateLimit } from "../lib/server/rate-limit"; import { enforceAuthRateLimit, enforceIpRateLimit, enforceUserWriteRateLimit } from "../lib/server/rate-limit";
const __filename = fileURLToPath(import.meta.url); const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename); const __dirname = path.dirname(__filename);
@ -99,3 +99,41 @@ test("user write rate limit blocks when threshold is exceeded", async t => {
]); ]);
} }
}); });
test("ip rate limit blocks when threshold is exceeded", async t => {
if (!hasDb) {
t.skip("DATABASE_URL not set");
return;
}
if (envLoaded.error) t.diagnostic(String(envLoaded.error));
await ensureRateLimitTable();
const pool = getPool();
const ip = `203.0.113.${Math.floor(Math.random() * 200)}`;
const scope = `test_ip_scope_${Date.now()}`;
const normalizedScope = scope.toLowerCase();
const normalizedIp = ip.toLowerCase();
try {
await enforceIpRateLimit({
scope,
ip,
limit: 1,
windowMs: 60_000
});
await assert.rejects(
() => enforceIpRateLimit({
scope,
ip,
limit: 1,
windowMs: 60_000
}),
(error: unknown) => error instanceof ApiError && error.code === "RATE_LIMITED"
);
} finally {
await pool.query("delete from rate_limits where key = $1", [
`ip:scope:${normalizedScope}:ip:${normalizedIp}`
]);
}
});

View File

@ -3,10 +3,12 @@ import { requireSessionUser } from "@/lib/server/session";
import { joinGroup } from "@/lib/server/groups"; import { joinGroup } from "@/lib/server/groups";
import { toErrorResponse } from "@/lib/server/errors"; import { toErrorResponse } from "@/lib/server/errors";
import { getRequestMeta } from "@/lib/server/request"; import { getRequestMeta } from "@/lib/server/request";
import { enforceIpRateLimit } from "@/lib/server/rate-limit";
export async function POST(req: Request) { export async function POST(req: Request) {
const { requestId } = await getRequestMeta(); const { requestId, ip } = await getRequestMeta();
try { try {
await enforceIpRateLimit({ scope: "groups:join:ip", ip, limit: 60 });
const user = await requireSessionUser(); const user = await requireSessionUser();
const body = await req.json().catch(() => null); const body = await req.json().catch(() => null);
const inviteCode = String(body?.inviteCode || "").trim().toUpperCase(); const inviteCode = String(body?.inviteCode || "").trim().toUpperCase();

View File

@ -3,10 +3,12 @@ import { getSessionUser, requireSessionUser } from "@/lib/server/session";
import { apiError, toErrorResponse } from "@/lib/server/errors"; import { apiError, toErrorResponse } from "@/lib/server/errors";
import { getRequestMeta } from "@/lib/server/request"; import { getRequestMeta } from "@/lib/server/request";
import { acceptInviteLink, getInviteLinkSummaryByToken, getInviteViewerStatus } from "@/lib/server/group-invites"; import { acceptInviteLink, getInviteLinkSummaryByToken, getInviteViewerStatus } from "@/lib/server/group-invites";
import { enforceIpRateLimit } from "@/lib/server/rate-limit";
export async function GET(_: Request, context: { params: Promise<{ token: string }> }) { export async function GET(_: Request, context: { params: Promise<{ token: string }> }) {
const { requestId } = await getRequestMeta(); const { requestId, ip } = await getRequestMeta();
try { try {
await enforceIpRateLimit({ scope: "invite-links:get:ip", ip, limit: 120 });
const { token } = await context.params; const { token } = await context.params;
const normalized = String(token || "").trim(); const normalized = String(token || "").trim();
if (!normalized) apiError("INVITE_NOT_FOUND"); if (!normalized) apiError("INVITE_NOT_FOUND");
@ -28,6 +30,7 @@ export async function GET(_: Request, context: { params: Promise<{ token: string
export async function POST(_: Request, context: { params: Promise<{ token: string }> }) { export async function POST(_: Request, context: { params: Promise<{ token: string }> }) {
const { requestId, ip, userAgent } = await getRequestMeta(); const { requestId, ip, userAgent } = await getRequestMeta();
try { try {
await enforceIpRateLimit({ scope: "invite-links:accept:ip", ip, limit: 60 });
const user = await requireSessionUser(); const user = await requireSessionUser();
const { token } = await context.params; const { token } = await context.params;
const normalized = String(token || "").trim(); const normalized = String(token || "").trim();

View File

@ -106,7 +106,6 @@ export default function TagInput({ label, labelAction, tags, suggestions, remove
function handleKeyDown(event: React.KeyboardEvent<HTMLInputElement>) { function handleKeyDown(event: React.KeyboardEvent<HTMLInputElement>) {
if (false && event.key === "Backspace" && !value && tags.length) { if (false && event.key === "Backspace" && !value && tags.length) {
console.log("Backspace pressed with empty input, removing last tag");
event.preventDefault(); event.preventDefault();
onToggleTag?.(tags[tags.length - 1]); onToggleTag?.(tags[tags.length - 1]);
return; return;

View File

@ -123,3 +123,14 @@ export async function enforceUserWriteRateLimit(input: { userId: number; scope:
windowMs: input.windowMs ?? (15 * 60 * 1000) windowMs: input.windowMs ?? (15 * 60 * 1000)
}); });
} }
export async function enforceIpRateLimit(input: { scope: string; ip?: string | null; limit?: number; windowMs?: number }) {
const scope = normalizeSegment(input.scope);
const ip = normalizeSegment(String(input.ip || "unknown"));
await consumeRateLimit({
key: `ip:scope:${scope}:ip:${ip}`,
scope,
limit: input.limit ?? 120,
windowMs: input.windowMs ?? (15 * 60 * 1000)
});
}

View File

@ -10,9 +10,17 @@ function parseForwardedIp(value: string | null): string | null {
return first.slice(0, 64); return first.slice(0, 64);
} }
function sanitizeRequestId(value: string | null) {
if (!value) return null;
const trimmed = value.trim();
if (!trimmed) return null;
const normalized = trimmed.slice(0, 96).replace(/[^a-zA-Z0-9._:-]/g, "");
return normalized || null;
}
export async function getRequestMeta() { export async function getRequestMeta() {
const headerStore = await headers(); const headerStore = await headers();
const forwardedRequestId = headerStore.get("x-request-id")?.trim(); const forwardedRequestId = sanitizeRequestId(headerStore.get("x-request-id"));
const requestId = forwardedRequestId || createRequestId(); const requestId = forwardedRequestId || createRequestId();
const ip = parseForwardedIp(headerStore.get("x-forwarded-for")) || parseForwardedIp(headerStore.get("x-real-ip")); const ip = parseForwardedIp(headerStore.get("x-forwarded-for")) || parseForwardedIp(headerStore.get("x-real-ip"));
return { return {

View File

@ -1,5 +1,23 @@
limit_req_zone $binary_remote_addr zone=fiddy_auth:10m rate=10r/m; limit_req_zone $binary_remote_addr zone=fiddy_auth:10m rate=10r/m;
limit_req_zone $binary_remote_addr zone=fiddy_write:10m rate=60r/m; limit_req_zone $binary_remote_addr zone=fiddy_write:10m rate=60r/m;
limit_conn_zone $binary_remote_addr zone=fiddy_conn:10m;
log_format fiddy_json escape=json
'{'
'"time":"$time_iso8601",'
'"remote_addr":"$remote_addr",'
'"request_id":"$request_id",'
'"request_method":"$request_method",'
'"uri":"$request_uri",'
'"status":$status,'
'"bytes_sent":$body_bytes_sent,'
'"request_time":$request_time,'
'"upstream_addr":"$upstream_addr",'
'"upstream_status":"$upstream_status",'
'"upstream_response_time":"$upstream_response_time",'
'"http_referer":"$http_referer",'
'"http_user_agent":"$http_user_agent"'
'}';
upstream fiddy_web { upstream fiddy_web {
server 127.0.0.1:3000; server 127.0.0.1:3000;
@ -17,6 +35,9 @@ server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name fiddy.example.com; server_name fiddy.example.com;
server_tokens off;
access_log /var/log/nginx/fiddy-access.log fiddy_json;
error_log /var/log/nginx/fiddy-error.log warn;
ssl_certificate /etc/letsencrypt/live/fiddy.example.com/fullchain.pem; ssl_certificate /etc/letsencrypt/live/fiddy.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/fiddy.example.com/privkey.pem; ssl_certificate_key /etc/letsencrypt/live/fiddy.example.com/privkey.pem;
@ -30,6 +51,7 @@ server {
client_header_timeout 15s; client_header_timeout 15s;
keepalive_timeout 30s; keepalive_timeout 30s;
send_timeout 30s; send_timeout 30s;
limit_conn fiddy_conn 50;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
add_header X-Content-Type-Options "nosniff" always; add_header X-Content-Type-Options "nosniff" always;

View File

@ -9,6 +9,27 @@ clients:
- url: http://loki:3100/loki/api/v1/push - url: http://loki:3100/loki/api/v1/push
scrape_configs: scrape_configs:
- job_name: nginx
static_configs:
- targets:
- localhost
labels:
job: nginx
__path__: /var/log/nginx/*.log
pipeline_stages:
- json:
expressions:
request_id: request_id
request_method: request_method
uri: uri
status: status
request_time: request_time
upstream_status: upstream_status
- labels:
job:
request_method:
status:
- job_name: system - job_name: system
static_configs: static_configs:
- targets: - targets:

View File

@ -98,6 +98,21 @@ Primary outcomes:
- `npm run lint`: pass (warnings only; no errors). - `npm run lint`: pass (warnings only; no errors).
- `npm test`: pass (`25 passed`, `1 skipped`). - `npm test`: pass (`25 passed`, `1 skipped`).
- `npm run build`: pass. - `npm run build`: pass.
- Added request-id input sanitization in `apps/web/lib/server/request.ts` to prevent malformed inbound ID propagation.
- Added nginx hardening/observability updates in `docker/nginx/fiddy.conf`:
- JSON access log format with request/upstream latency fields.
- `server_tokens off`, explicit access/error logs, and connection cap.
- Added nginx log parsing pipeline in `docker/observability/promtail-config.yml` for Loki ingestion (`job="nginx"`).
- Rewrote `docs/public-launch-runbook.md` in clean ASCII and expanded proxy/observability checks.
- Added `docs/06_SECURITY_REVIEW.md` with app/data/user/host findings and launch checklist.
- Added per-IP abuse controls for invite/join surfaces:
- `apps/web/lib/server/rate-limit.ts` adds `enforceIpRateLimit`.
- Applied in `apps/web/app/api/groups/join/route.ts`.
- Applied in `apps/web/app/api/invite-links/[token]/route.ts` (`GET` + `POST`).
- Added regression coverage for IP limiter in `apps/web/__tests__/rate-limit.test.ts`.
- Added operational smoke tooling for deploy/rollback validation:
- `scripts/smoke-public-launch.sh` checks health endpoints, `X-Request-Id`, and `request_id` response fields.
- Expanded `docs/public-launch-runbook.md` with deployment smoke and rollback checklist sections.
### Risks / Notes to Revisit ### Risks / Notes to Revisit
- Workspace is intentionally dirty; commits must be path-scoped to avoid mixing unrelated changes. - Workspace is intentionally dirty; commits must be path-scoped to avoid mixing unrelated changes.

View File

@ -0,0 +1,71 @@
# Security Review (Public Launch Baseline)
## Purpose
This document tracks launch-critical security findings for app, data, users, and host exposure, plus current mitigation status.
## Findings and Status
1. Direct home-IP exposure increases scanning and DDoS risk.
- Status: Partially mitigated.
- Mitigations in repo:
- TLS + HTTPS redirect (`docker/nginx/fiddy.conf`)
- request rate limits (`docker/nginx/fiddy.conf`)
- connection cap (`docker/nginx/fiddy.conf`)
- Required ops actions:
- enforce host firewall allowlist rules
- restrict SSH to VPN or fixed allowlist
- consider optional upstream shielding (Cloudflare free tier)
2. API abuse risk (auth and write endpoints).
- Status: Mitigated.
- Mitigations in repo:
- server-side rate limiting (`apps/web/lib/server/rate-limit.ts`)
- auth route limiter integration (`apps/web/app/api/auth/login/route.ts`, `apps/web/app/api/auth/register/route.ts`)
- write-path limiter integration in server services (`apps/web/lib/server/*.ts`)
- proxy rate limits (`docker/nginx/fiddy.conf`)
3. Sensitive log exposure risk.
- Status: Mitigated.
- Mitigations in repo:
- invite/token/password redaction in error logging (`apps/web/lib/server/errors.ts`)
- invite metadata stores last4 only (`apps/web/lib/server/groups.ts`, `apps/web/lib/server/group-invites.ts`)
- removed client debug console output (`apps/web/components/tag-input.tsx`)
4. Request traceability gaps for incident response.
- Status: Mitigated.
- Mitigations in repo:
- API response includes `request_id` + `requestId` compatibility alias
- request-id propagation through proxy (`docker/nginx/includes/fiddy-proxy.conf`)
- request-id response header (`docker/nginx/fiddy.conf`)
- structured JSON access logging (`docker/nginx/fiddy.conf`)
- nginx log ingestion by promtail (`docker/observability/promtail-config.yml`)
5. Session and auth contract risk.
- Status: Mitigated.
- Mitigations in repo:
- DB-backed sessions with HttpOnly cookie use preserved (`apps/web/lib/server/session.ts`, auth routes)
- route/service authorization remains server-side (`apps/web/lib/server/group-access.ts`, service modules)
6. Data leakage risk for receipt bytes.
- Status: Mitigated.
- Mitigations in repo:
- entries list services do not return receipt bytes (`apps/web/lib/server/entries.ts`)
- receipt bytes remain separate retrieval flow by contract
## Open Operational Tasks (Not Code)
1. Rotate all production secrets before public launch.
2. Run weekly restore drill and track measured RTO/RPO.
3. Enable host-level ban tooling (Fail2ban or CrowdSec) on nginx logs.
4. Create Grafana alerts for:
- elevated 5xx rate
- repeated 401/403 spikes
- DB connectivity failures
- disk usage thresholds
## Verification Checklist
- [x] `npm run lint` passes (warnings acceptable for now).
- [x] `npm test` passes.
- [x] `npm run build` passes.
- [ ] Production host firewall rules verified.
- [ ] SSH restricted to VPN/allowlist.
- [ ] Backup restore drill logged for current week.

View File

@ -31,23 +31,35 @@
## 4) Reverse Proxy + Network Hardening ## 4) Reverse Proxy + Network Hardening
- Use `docker/nginx/fiddy.conf` as baseline. - Use `docker/nginx/fiddy.conf` as baseline.
- Install certificate with Lets Encrypt. - Install certificate with Let's Encrypt.
- Route 443 -> app container only. - Route 443 -> app container only.
- Keep Postgres private; never expose 5432 publicly. - Keep Postgres private; never expose 5432 publicly.
- Restrict SSH to allowlist/VPN. - Restrict SSH to allowlist/VPN.
- Add host firewall rules: - Add host firewall rules:
- Allow inbound `80/443`. - Allow inbound `80/443`.
- Deny all other inbound by default. - Deny all other inbound by default.
- Confirm Nginx writes JSON logs:
- `/var/log/nginx/fiddy-access.log`
- `/var/log/nginx/fiddy-error.log`
## 5) Observability ## 5) Observability
- Bring up monitoring stack: - Bring up monitoring stack:
- `docker compose -f docker/observability/docker-compose.observability.yml up -d` - `docker compose -f docker/observability/docker-compose.observability.yml up -d`
- Configure Grafana datasource to Loki (`http://loki:3100`). - Configure Grafana datasource to Loki (`http://loki:3100`).
- Verify nginx logs are ingested by Promtail (`job="nginx"`).
- Add Uptime Kuma monitors: - Add Uptime Kuma monitors:
- `/api/health/live` - `/api/health/live`
- `/api/health/ready` - `/api/health/ready`
- home page (`/`) - home page (`/`)
## 5.1) Deployment Smoke Check
- Run after every deploy and rollback:
- `scripts/smoke-public-launch.sh https://your-domain`
- The script verifies:
- `/api/health/live` and `/api/health/ready` return `200`
- both responses include `X-Request-Id` header
- both response bodies include `request_id`
## 6) Backup + Restore ## 6) Backup + Restore
- Daily backup command: - Daily backup command:
- `scripts/backup-postgres.sh` - `scripts/backup-postgres.sh`
@ -63,3 +75,10 @@
3. Check `/api/health/ready` status and DB connectivity. 3. Check `/api/health/ready` status and DB connectivity.
4. Roll back to previous known-good Dokploy release if needed. 4. Roll back to previous known-good Dokploy release if needed.
5. Capture root cause and update this runbook/checklist. 5. Capture root cause and update this runbook/checklist.
## 8) Rollback Checklist
1. Select previous healthy image in Dokploy release history.
2. Trigger rollback and wait for deployment completion.
3. Run `scripts/smoke-public-launch.sh https://your-domain`.
4. Verify error-rate drop in Grafana/Loki and confirm no DB migration mismatch.
5. Log the rolled back version, timestamp, and reason.

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bash
set -euo pipefail
BASE_URL="${1:-${BASE_URL:-}}"
if [[ -z "${BASE_URL}" ]]; then
echo "Usage: BASE_URL=https://your-domain ./scripts/smoke-public-launch.sh"
echo " or: ./scripts/smoke-public-launch.sh https://your-domain"
exit 1
fi
LIVE_URL="${BASE_URL%/}/api/health/live"
READY_URL="${BASE_URL%/}/api/health/ready"
tmp_headers_live="$(mktemp)"
tmp_headers_ready="$(mktemp)"
tmp_body_live="$(mktemp)"
tmp_body_ready="$(mktemp)"
trap 'rm -f "$tmp_headers_live" "$tmp_headers_ready" "$tmp_body_live" "$tmp_body_ready"' EXIT
status_live="$(curl -sS -D "$tmp_headers_live" -o "$tmp_body_live" -w "%{http_code}" "$LIVE_URL")"
status_ready="$(curl -sS -D "$tmp_headers_ready" -o "$tmp_body_ready" -w "%{http_code}" "$READY_URL")"
echo "LIVE status: $status_live"
echo "READY status: $status_ready"
if [[ "$status_live" != "200" ]]; then
echo "Live check failed:"
cat "$tmp_body_live"
exit 1
fi
if [[ "$status_ready" != "200" ]]; then
echo "Ready check failed:"
cat "$tmp_body_ready"
exit 1
fi
request_id_header_live="$(grep -i '^x-request-id:' "$tmp_headers_live" | tail -n 1 | cut -d':' -f2- | xargs || true)"
request_id_header_ready="$(grep -i '^x-request-id:' "$tmp_headers_ready" | tail -n 1 | cut -d':' -f2- | xargs || true)"
if [[ -z "$request_id_header_live" || -z "$request_id_header_ready" ]]; then
echo "Missing X-Request-Id header in one or more responses."
exit 1
fi
if ! grep -q '"request_id"' "$tmp_body_live"; then
echo "Live response missing request_id field."
exit 1
fi
if ! grep -q '"request_id"' "$tmp_body_ready"; then
echo "Ready response missing request_id field."
exit 1
fi
echo "Smoke checks passed."
echo "Live X-Request-Id: $request_id_header_live"
echo "Ready X-Request-Id: $request_id_header_ready"