diff --git a/load-testing/Dockerfile b/load-testing/Dockerfile new file mode 100644 index 00000000..e1b30e53 --- /dev/null +++ b/load-testing/Dockerfile @@ -0,0 +1,79 @@ +# ============================================================================= +# Skyflow Java SDK v3 — Load Test SUT Image +# +# Builds and runs both: +# - EchoServer (mock Skyflow vault) on $ECHO_PORT (default 3015) +# - WrapperServer (SDK under test) on $WRAPPER_PORT (default 8080) +# +# Build context: repo root +# docker build -f load-testing/Dockerfile -t skyflow-sut . +# +# Run: +# docker run -p 8080:8080 -p 3015:3015 skyflow-sut +# ============================================================================= + +# --------------------------------------------------------------------------- +# Stage 1: Build — install common + v3 SDK, build wrapper fat jar +# --------------------------------------------------------------------------- +FROM maven:3.9-eclipse-temurin-17 AS builder + +WORKDIR /build + +# Cache common module dependencies +COPY common/pom.xml common/pom.xml +RUN mvn dependency:go-offline -f common/pom.xml -q || true + +# Build common module +COPY common/ common/ +RUN mvn install -f common/pom.xml -DskipTests -Dgpg.skip=true -q + +# Cache v3 SDK dependencies +COPY v3/pom.xml v3/pom.xml +RUN mvn dependency:go-offline -f v3/pom.xml -q || true + +# Build v3 SDK +COPY v3/ v3/ +RUN mvn install -f v3/pom.xml -DskipTests -Dgpg.skip=true -q + +# Cache wrapper dependencies +COPY load-testing/wrapper/pom.xml load-testing/wrapper/pom.xml +RUN mvn dependency:go-offline -f load-testing/wrapper/pom.xml -q || true + +# Build wrapper fat jar +COPY load-testing/wrapper/ load-testing/wrapper/ +RUN mvn package -f load-testing/wrapper/pom.xml -DskipTests -Dgpg.skip=true -q + +# --------------------------------------------------------------------------- +# Stage 2: Compile EchoServer (no Maven needed — single file) +# --------------------------------------------------------------------------- +FROM eclipse-temurin:17-jdk AS echo-builder + +WORKDIR /echo +COPY load-testing/echo-server/EchoServer.java . +RUN javac EchoServer.java + +# --------------------------------------------------------------------------- +# Stage 3: Runtime image +# --------------------------------------------------------------------------- +FROM eclipse-temurin:17-jre + +WORKDIR /app + +# Copy artifacts +COPY --from=builder /build/load-testing/wrapper/target/skyflow-load-test-wrapper-1.0.0.jar wrapper.jar +COPY --from=echo-builder /echo/EchoServer.class . + +# Startup script — runs EchoServer then WrapperServer +COPY load-testing/docker-entrypoint.sh entrypoint.sh +RUN chmod +x entrypoint.sh + +# Expose ports +EXPOSE 8080 3015 + +ENV ECHO_PORT=3015 +ENV WRAPPER_PORT=8080 +ENV ECHO_WAIT_MS=0 +ENV ECHO_ERR_PCT=0 +ENV VAULT_ID=mock-vault-id + +ENTRYPOINT ["./entrypoint.sh"] diff --git a/load-testing/README.md b/load-testing/README.md new file mode 100644 index 00000000..d0bcdb8e --- /dev/null +++ b/load-testing/README.md @@ -0,0 +1,414 @@ +# Skyflow Java SDK v3 — Load Testing + +Measures SDK throughput, latency, and error-handling under concurrent load using [k6](https://k6.io/), without hitting a real Skyflow vault. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ k6 (Load Generator) │ +│ │ +│ VU 1 ──┐ │ +│ VU 2 ──┤ │ +│ VU 3 ──┼──── POST /insert ─────────────────────────► │ +│ ... ├──── POST /detokenize ─────────────────────────► │ +│ VU N ──┘ │ +└─────────────────────────┬───────────────────────────────────────┘ + │ concurrent HTTP (default: 50 VUs) + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ WrapperServer (Java) :8080 │ +│ │ +│ Thread Pool (200 threads) │ +│ ┌──────────────────────────────────────────────────────────┐ │ +│ │ InsertHandler → skyflowClient.vault().bulkInsert() │ │ +│ │ DetokenizeHandler → skyflowClient.vault().bulkDetokenize│ │ +│ │ HealthHandler → GET /health │ │ +│ │ MetricsHandler → GET /metrics (JVM + counters) │ │ +│ └──────────────────────────────────────────────────────────┘ │ +│ │ │ +│ Skyflow Java SDK v3 │ +└────────────────────┬────────────────────────────────────────────┘ + │ HTTP (SDK internal calls) + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ EchoServer (Java) :3015 │ +│ (fake Skyflow vault) │ +│ │ +│ POST /v2/records/insert → echoes back mock response │ +│ POST /v2/tokens/detokenize → echoes back mock response │ +│ GET /health → liveness check │ +│ │ +│ Configurable: ECHO_WAIT_MS (latency) ECHO_ERR_PCT (errors) │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Request Flow + +``` +k6 VU + │ + │ POST /insert { table, num_records } + ▼ +WrapperServer + │ builds InsertRequest + │ calls skyflowClient.vault().bulkInsert() + ▼ +Skyflow SDK (real SDK code under test) + │ HTTP POST /v2/records/insert + ▼ +EchoServer (mock vault — no real Skyflow needed) + │ waits ECHO_WAIT_MS ms + │ randomly fails ECHO_ERR_PCT % of requests + │ returns mock JSON response + ▼ +Skyflow SDK (parses response, builds InsertResponse) + ▼ +WrapperServer (serializes response, increments counters) + ▼ +k6 VU (checks status, records metrics) +``` + +### Startup Sequence (`run.sh`) + +``` +run.sh + │ + ├─ 1. mvn install (v3 SDK → local Maven repo) + ├─ 2. mvn package (build WrapperServer fat JAR) + ├─ 3. javac (compile EchoServer) + ├─ 4. java EchoServer :3015 ──► health check + ├─ 5. java WrapperServer :8080 ──► health check + └─ 6. k6 run insert.js / detokenize.js / both + │ + └─ on exit: kill EchoServer + WrapperServer +``` + +- **k6** — drives concurrent load (VUs) +- **WrapperServer** — Java HTTP server wrapping the Skyflow Java v3 SDK; handles up to 200 threads in parallel +- **EchoServer** — mock Skyflow vault; no real credentials needed + +--- + +## Prerequisites + +- Java 11+ +- Maven 3.x +- [k6](https://k6.io/docs/get-started/installation/) installed (`brew install k6` on macOS) + +--- + +## Quick Start + +```bash +# From the repo root +./load-testing/run.sh all +``` + +This will: +1. Install the v3 SDK to your local Maven repo +2. Build the WrapperServer fat JAR +3. Compile and start the EchoServer +4. Start the WrapperServer +5. Run `insert.js` then `detokenize.js` +6. Print metrics and shut down both servers + +--- + +## Usage + +```bash +./load-testing/run.sh [insert|detokenize|all] [extra k6 flags] +``` + +| Command | Description | +|---|---| +| `./load-testing/run.sh insert` | Run insert load test only | +| `./load-testing/run.sh detokenize` | Run detokenize load test only | +| `./load-testing/run.sh all` | Run both tests sequentially | + +--- + +## Environment Variables + +### Infrastructure (`run.sh`) + +These control the EchoServer and WrapperServer. Set them as shell environment variables before running `run.sh`. + +| Variable | Default | Description | +|---|---|---| +| `ECHO_PORT` | `3015` | Port the EchoServer (fake vault) listens on | +| `WRAPPER_PORT` | `8080` | Port the WrapperServer listens on | +| `ECHO_WAIT_MS` | `0` | Artificial latency added to every EchoServer response (ms). Use to simulate a slow vault. | +| `ECHO_ERR_PCT` | `0` | Percentage of requests the EchoServer randomly fails (0–100). Use to test SDK error handling. | +| `VAULT_ID` | `mock-vault-id` | Vault ID passed to the SDK. | + +### `insert.js` (k6) + +These control the insert load test. Pass them after the test name using `--env KEY=VALUE`. + +| Variable | Default | Description | +|---|---|---| +| `WRAPPER_URL` | `http://localhost:8080` | WrapperServer base URL | +| `VUS` | `50` | Number of concurrent virtual users (concurrency level) | +| `DURATION` | `120` | Total test duration in seconds. Includes a fixed 30s ramp-up and 30s ramp-down, so minimum useful value is `61`. | +| `NUM_RECORDS` | `1` | Number of records per `bulkInsert()` SDK call. Increase to test batching. | +| `TABLE` | `load_test_table` | Vault table name sent in each insert request. | + +### `detokenize.js` (k6) + +These control the detokenize load test. Pass them after the test name using `--env KEY=VALUE`. + +| Variable | Default | Description | +|---|---|---| +| `WRAPPER_URL` | `http://localhost:8080` | WrapperServer base URL | +| `VUS` | `50` | Number of concurrent virtual users | +| `DURATION` | `120` | Total test duration in seconds (min: 61) | +| `NUM_TOKENS` | `1` | Number of tokens per `bulkDetokenize()` SDK call. Increase to test batching. | +| `TOKEN` | `mock-token-0000-0000-0000-000000000001` | Base token string. An index suffix is appended for each token in a batch (e.g. `mock-token-...-0`, `mock-token-...-1`). | + +--- + +## Examples + +**Baseline run with defaults:** +```bash +./load-testing/run.sh all +``` + +**High concurrency insert test (200 VUs, 3 min):** +```bash +VUS=200 DURATION=180 ./load-testing/run.sh insert +``` + +**Simulate slow vault (100ms latency):** +```bash +ECHO_WAIT_MS=100 ./load-testing/run.sh all +``` + +**Simulate 5% vault error rate:** +```bash +ECHO_ERR_PCT=5 ./load-testing/run.sh detokenize +``` + +**Batch insert — 10 records per SDK call:** +```bash +./load-testing/run.sh insert --env NUM_RECORDS=10 +``` + +**Batch detokenize — 5 tokens per SDK call, 100 VUs:** +```bash +./load-testing/run.sh detokenize --env NUM_TOKENS=5 --env VUS=100 +``` + +**Combined stress test with latency and errors:** +```bash +ECHO_WAIT_MS=50 ECHO_ERR_PCT=2 VUS=150 DURATION=300 ./load-testing/run.sh all +``` + +--- + +## Thresholds + +Both tests fail if any of these are breached: + +| Metric | Threshold | +|---|---| +| `http_req_duration` p95 | < 500ms | +| `http_req_failed` rate | < 1% | +| `insert_errors` / `detokenize_errors` rate | < 1% | +| `insert_sdk_duration_ms` / `detokenize_sdk_duration_ms` p95 | < 400ms | + +--- + +## Metrics Endpoint + +While a test is running, query the WrapperServer for live JVM and SDK stats: + +```bash +curl http://localhost:8080/metrics +``` + +**Response:** +```json +{ + "sdk_calls": { "total": 1200, "success": 1195, "error": 5 }, + "jvm": { + "heap_used_mb": 112, + "heap_total_mb": 256, + "heap_max_mb": 512, + "active_threads": 87, + "gc_count": 14, + "gc_time_ms": 320 + } +} +``` + +--- + +## Port Conflicts + +If you see `Address already in use`, kill the stale processes: + +```bash +lsof -ti :3015 | xargs kill -9 +lsof -ti :8080 | xargs kill -9 +``` + +--- + +## SDK Performance Testing + +A dedicated performance testing layer on top of the existing setup, matching the SDK perf testing spec. + +### Goals + +| Objective | What is measured | +|---|---| +| Scalability | RPS vs. RAM / CPU consumption | +| Concurrency | Thread count growth under load | +| Stability | Memory leaks / death spirals under sustained high load | + +### Workload Profile (Linear Step-Up) + +Each step runs for `STEP_DURATION` seconds (default: 5 min) to let the JVM's heap settle. + +``` +RPS +1000 │ ┌─────────────┐ + │ / \ + 500 │ ┌────────┘ └─────────┐ + │ / \ + 100 │ ┌────────┘ └── + │ / + 0 │──┘ + └──────────────────────────────────────────────────────── time + Baseline Light Medium High Ramp-down + (1m) (5m) (5m) (5m) (5m) +``` + +### Resource Phases (Docker) + +| Phase | CPU | RAM | Purpose | +|---|---|---|---| +| Phase 1 | 1 Core | 512 MB | Baseline — typical constrained environment | +| Phase 2 | 2 Cores | 512 MB | CPU scaling — same memory | +| Phase 3 | 4 Cores | 1 GB | Unconstrained — max throughput | + +### Metrics Captured + +**External (OS level, via `docker stats`)** + +| Metric | Unit | Purpose | +|---|---|---| +| RAM (total physical) | MB | Primary scaling metric; detects leaks | +| CPU utilization | % per core | Identifies CPU-bound behaviour | + +**Internal (JVM level, via `/metrics` polling every 5s)** + +| Metric | Unit | Purpose | +|---|---|---| +| Heap used / total / max | MB | Detects managed memory leaks | +| Active threads | Count | Detects thread pool saturation | +| GC count & pause time | Count / ms | Measures GC pressure | +| SDK call counters | Count | Success vs. error ratio | + +**k6 output** + +| Metric | Unit | Purpose | +|---|---|---| +| `http_req_duration` p50/p95/p99 | ms | End-to-end latency distribution | +| `sdk_latency_ms` p95/p99 | ms | SDK-only latency | +| `http_req_failed` rate | % | HTTP error rate | +| `sdk_error_rate` | % | SDK-level error rate | +| `sdk_requests_total` | Count | Total requests processed | + +### Running the Performance Test + +**Prerequisites:** Docker, k6, Java, Maven. + +**Option A — Full three-phase automated run:** + +```bash +# From repo root — runs all 3 resource phases automatically +./load-testing/perf-run.sh insert 1000 + +# Detokenize at 500 RPS peak +./load-testing/perf-run.sh detokenize 500 +``` + +Results are saved to `load-testing/results//`: +``` +phase1_1cpu_512mb_docker_stats.csv +phase1_1cpu_512mb_jvm_metrics.jsonl +phase1_1cpu_512mb_k6_output.json +phase1_1cpu_512mb_k6_summary.json +phase2_... +phase3_... +``` + +**Option B — Single phase with Docker Compose:** + +```bash +cd + +# Phase 1: 1 CPU / 512 MB +CPU_LIMIT=1 MEM_LIMIT=512m OP=insert MAX_RPS=1000 \ + docker compose -f load-testing/docker-compose.perf.yml up --build + +# Phase 2: 2 CPUs / 512 MB +CPU_LIMIT=2 MEM_LIMIT=512m OP=insert MAX_RPS=1000 \ + docker compose -f load-testing/docker-compose.perf.yml up + +# Phase 3: 4 CPUs / 1 GB +CPU_LIMIT=4 MEM_LIMIT=1g OP=insert MAX_RPS=1000 \ + docker compose -f load-testing/docker-compose.perf.yml up +``` + +**Option C — Local (no Docker), custom RPS:** + +```bash +# Shorter step duration (60s) for quick validation +STEP_DURATION=60 ./load-testing/run.sh insert --env MAX_RPS=500 +``` + +### Perf Test Env Vars + +| Variable | Default | Description | +|---|---|---| +| `OP` | `insert` | Operation under test: `insert` or `detokenize` | +| `MAX_RPS` | `1000` | Peak RPS (stress target). Light=10%, Medium=50%, High=100% of this. | +| `STEP_DURATION` | `300` | Seconds spent at each RPS level | +| `CPU_LIMIT` | `1` | Docker CPU cores for SUT container | +| `MEM_LIMIT` | `512m` | Docker memory limit for SUT container | +| `NUM_RECORDS` | `1` | Records per `bulkInsert()` call | +| `NUM_TOKENS` | `1` | Tokens per `bulkDetokenize()` call | + +### Thresholds (perf.js) + +| Metric | p95 | p99 | +|---|---|---| +| `http_req_duration` | < 500ms | < 1000ms | +| `sdk_latency_ms` | < 400ms | < 800ms | +| `http_req_failed` | < 5% | — | +| `sdk_error_rate` | < 5% | — | + +### File Structure + +``` +load-testing/ +├── perf-run.sh # three-phase orchestrator (local + docker) +├── docker-compose.perf.yml # two-tier Docker setup (SUT + k6) +├── Dockerfile # SUT image (WrapperServer + EchoServer) +├── docker-entrypoint.sh # container startup script +├── k6/ +│ ├── perf.js # RPS-based step-up perf script ← NEW +│ ├── insert.js # time-based insert test +│ └── detokenize.js # time-based detokenize test +└── results/ # output directory (auto-created) + └── YYYY-MM-DD_HH-MM-SS/ + ├── phase1_*_docker_stats.csv + ├── phase1_*_jvm_metrics.jsonl + ├── phase1_*_k6_output.json + └── phase1_*_k6_summary.json +``` \ No newline at end of file diff --git a/load-testing/docker-compose.perf.yml b/load-testing/docker-compose.perf.yml new file mode 100644 index 00000000..e6d15436 --- /dev/null +++ b/load-testing/docker-compose.perf.yml @@ -0,0 +1,91 @@ +# ============================================================================= +# Skyflow SDK v3 — Two-Tier Performance Testing +# +# Tier 1 (sut): WrapperServer + EchoServer (SDK under test) +# Tier 2 (k6): k6 load generator +# +# Usage: +# # Phase 1 — 1 CPU / 512 MB +# docker compose -f load-testing/docker-compose.perf.yml up --build +# +# # Override resource limits for Phase 2 / 3 +# CPU_LIMIT=2 MEM_LIMIT=512m docker compose -f load-testing/docker-compose.perf.yml up +# +# Env overrides: +# CPU_LIMIT default: 1 (cores) +# MEM_LIMIT default: 512m +# MAX_RPS default: 1000 +# OP default: insert (insert | detokenize) +# ECHO_WAIT_MS default: 0 +# ECHO_ERR_PCT default: 0 +# ============================================================================= + +version: '3.8' + +x-sut-resources: &sut-resources + cpus: '${CPU_LIMIT:-1}' + memory: '${MEM_LIMIT:-512m}' + +services: + + # --------------------------------------------------------------------------- + # Tier 1: System Under Test — SDK + mock vault + # --------------------------------------------------------------------------- + sut: + build: + context: .. # repo root (needs common/, v3/, load-testing/) + dockerfile: load-testing/Dockerfile + container_name: skyflow-sut + ports: + - "8080:8080" # WrapperServer (SDK) + - "3015:3015" # EchoServer (mock vault) — optional, for debugging + environment: + ECHO_PORT: 3015 + WRAPPER_PORT: 8080 + ECHO_WAIT_MS: "${ECHO_WAIT_MS:-0}" + ECHO_ERR_PCT: "${ECHO_ERR_PCT:-0}" + VAULT_ID: mock-vault-id + deploy: + resources: + limits: + <<: *sut-resources + healthcheck: + test: ["CMD", "curl", "-sf", "http://localhost:8080/health"] + interval: 5s + timeout: 3s + retries: 10 + start_period: 30s + networks: + - perf-net + + # --------------------------------------------------------------------------- + # Tier 2: Load Generator — k6 + # Runs after SUT is healthy; exits when test completes. + # --------------------------------------------------------------------------- + k6: + image: grafana/k6:latest + container_name: skyflow-k6 + depends_on: + sut: + condition: service_healthy + volumes: + - ./k6:/scripts # mount k6 scripts + - ./results:/results # output directory + environment: + WRAPPER_URL: "http://sut:8080" + OP: "${OP:-insert}" + MAX_RPS: "${MAX_RPS:-1000}" + STEP_DURATION: "${STEP_DURATION:-300}" + NUM_RECORDS: "${NUM_RECORDS:-1}" + NUM_TOKENS: "${NUM_TOKENS:-1}" + command: > + run + --out json=/results/k6-output.json + --summary-export /results/k6-summary.json + /scripts/perf.js + networks: + - perf-net + +networks: + perf-net: + driver: bridge diff --git a/load-testing/docker-entrypoint.sh b/load-testing/docker-entrypoint.sh new file mode 100755 index 00000000..23c74732 --- /dev/null +++ b/load-testing/docker-entrypoint.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Starts EchoServer and WrapperServer inside the SUT container. +set -euo pipefail + +ECHO_PORT="${ECHO_PORT:-3015}" +WRAPPER_PORT="${WRAPPER_PORT:-8080}" +ECHO_WAIT_MS="${ECHO_WAIT_MS:-0}" +ECHO_ERR_PCT="${ECHO_ERR_PCT:-0}" +VAULT_ID="${VAULT_ID:-mock-vault-id}" + +echo "[entrypoint] Starting EchoServer on :$ECHO_PORT (wait=${ECHO_WAIT_MS}ms, err=${ECHO_ERR_PCT}%)" +java -cp /app EchoServer "$ECHO_PORT" "$ECHO_WAIT_MS" "$ECHO_ERR_PCT" & +ECHO_PID=$! + +# Wait for EchoServer to be ready +for i in $(seq 1 10); do + curl -sf "http://localhost:$ECHO_PORT/health" > /dev/null 2>&1 && break + sleep 1 +done + +echo "[entrypoint] Starting WrapperServer on :$WRAPPER_PORT" +VAULT_ID="$VAULT_ID" \ +VAULT_URL="http://localhost:$ECHO_PORT" \ +WRAPPER_PORT="$WRAPPER_PORT" \ +java -jar /app/wrapper.jar & +WRAPPER_PID=$! + +# Wait for WrapperServer to be ready +for i in $(seq 1 15); do + curl -sf "http://localhost:$WRAPPER_PORT/health" > /dev/null 2>&1 && break + sleep 1 +done + +echo "[entrypoint] Both servers running. EchoServer PID=$ECHO_PID, WrapperServer PID=$WRAPPER_PID" + +# Keep container alive; forward signals +trap "kill $ECHO_PID $WRAPPER_PID 2>/dev/null" EXIT INT TERM +wait $WRAPPER_PID diff --git a/load-testing/echo-server/EchoServer$DetokenizeHandler.class b/load-testing/echo-server/EchoServer$DetokenizeHandler.class new file mode 100644 index 00000000..9dc8fdfd Binary files /dev/null and b/load-testing/echo-server/EchoServer$DetokenizeHandler.class differ diff --git a/load-testing/echo-server/EchoServer$HealthHandler.class b/load-testing/echo-server/EchoServer$HealthHandler.class new file mode 100644 index 00000000..8ed57380 Binary files /dev/null and b/load-testing/echo-server/EchoServer$HealthHandler.class differ diff --git a/load-testing/echo-server/EchoServer$InsertHandler.class b/load-testing/echo-server/EchoServer$InsertHandler.class new file mode 100644 index 00000000..f3b6b479 Binary files /dev/null and b/load-testing/echo-server/EchoServer$InsertHandler.class differ diff --git a/load-testing/echo-server/EchoServer$MetricsHandler.class b/load-testing/echo-server/EchoServer$MetricsHandler.class new file mode 100644 index 00000000..0e23a2ae Binary files /dev/null and b/load-testing/echo-server/EchoServer$MetricsHandler.class differ diff --git a/load-testing/echo-server/EchoServer.class b/load-testing/echo-server/EchoServer.class new file mode 100644 index 00000000..3cd6293b Binary files /dev/null and b/load-testing/echo-server/EchoServer.class differ diff --git a/load-testing/echo-server/EchoServer.java b/load-testing/echo-server/EchoServer.java new file mode 100644 index 00000000..e70fac62 --- /dev/null +++ b/load-testing/echo-server/EchoServer.java @@ -0,0 +1,277 @@ +/** + * Skyflow SDK Load Testing - Echo/Mock Server (v3 SDK) + * + * Simulates only the Vault v3 API endpoints that VaultController actually calls: + * POST /v2/records/insert <- bulkInsert() / bulkInsertAsync() + * POST /v2/tokens/detokenize <- bulkDetokenize() / bulkDetokenizeAsync() + * + * Usage: + * javac EchoServer.java + * java EchoServer [port] [wait_time_ms] [error_rate_percent] + * + * Examples: + * java EchoServer 3015 # defaults: port=3015, wait=0ms, error=0% + * java EchoServer 3015 50 # 50 ms simulated latency per request + * java EchoServer 3015 50 10 # 50 ms latency + 10 % random 5xx + * + * wait_time_ms / expected_response_code can also be passed per-request: + * - as JSON fields in the request body (e.g. {"wait_time_ms":50,...}) + * - or as query params (?wait_time_ms=50&expected_response_code=500) + */ + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class EchoServer { + + static int defaultWaitMs = 0; + static int errorRatePct = 0; + static final Random rng = new Random(); + + static final AtomicLong totalRequests = new AtomicLong(); + static final AtomicLong totalErrors = new AtomicLong(); + + public static void main(String[] args) throws IOException { + int port = 3015; + if (args.length >= 1) port = Integer.parseInt(args[0]); + if (args.length >= 2) defaultWaitMs = Integer.parseInt(args[1]); + if (args.length >= 3) errorRatePct = Integer.parseInt(args[2]); + + HttpServer server = HttpServer.create(new InetSocketAddress(port), 0); + server.createContext("/v2/records/insert", new InsertHandler()); + server.createContext("/v2/tokens/detokenize", new DetokenizeHandler()); + server.createContext("/metrics", new MetricsHandler()); + server.createContext("/health", new HealthHandler()); +// int echoThreads = defaultWaitMs > 0 ? 500 : 100; + server.setExecutor(Executors.newFixedThreadPool(10)); + server.start(); + + System.out.printf("[EchoServer-v3] port=%d wait=%dms error_rate=%d%%%n", + port, defaultWaitMs, errorRatePct); + } + + // ========================================================================= + // POST /v2/records/insert + // Called by VaultController.bulkInsert() / bulkInsertAsync() + // + // SDK request body: + // {"vaultId":"...","tableName":"...","records":[{"data":{"col":"val"},...}],"upsert":{...}} + // + // Expected response (RecordResponseObject per record): + // {"records":[{"skyflowID":"uuid","tokens":{"col":"tok"},"tableName":"tbl","httpCode":200}]} + // ========================================================================= + static class InsertHandler implements HttpHandler { + @Override + public void handle(HttpExchange ex) throws IOException { + totalRequests.incrementAndGet(); + try { + String body = readBody(ex.getRequestBody()); + String query = ex.getRequestURI().getQuery(); + + simulateLatency(body, query); + + int code = resolveExpectedCode(body, query); + if (code != 200) { + totalErrors.incrementAndGet(); + sendJson(ex, code, errorBody(code)); + return; + } + + int count = countPattern(body, "\"data\""); + if (count == 0) count = 1; + String table = orDefault(extractString(body, "tableName"), "load_test_table"); + + StringBuilder sb = new StringBuilder("{\"records\":["); + for (int i = 0; i < count; i++) { + if (i > 0) sb.append(","); + String id = uuid(); + sb.append("{\"skyflowID\":\"").append(id).append("\",") + .append("\"tokens\":{\"mock_field\":\"tok-").append(id, 0, 8).append("\"},") + .append("\"tableName\":\"").append(table).append("\",") + .append("\"httpCode\":200}"); + } + sb.append("]}"); + // add logs here + System.out.println("Insert call received with record count: " + count); + sendJson(ex, 200, sb.toString()); + + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + sendJson(ex, 500, errorBody(500)); + } catch (IOException e) { + throw e; + } + } + } + + // ========================================================================= + // POST /v2/tokens/detokenize + // Called by VaultController.bulkDetokenize() / bulkDetokenizeAsync() + // + // SDK request body: + // {"vaultId":"...","tokens":["tok1","tok2"],"tokenGroupRedactions":[...]} + // + // Expected response (DetokenizeResponseObject per token): + // {"response":[{"token":"tok1","value":"plain-val","httpCode":200},...]} + // ========================================================================= + static class DetokenizeHandler implements HttpHandler { + @Override + public void handle(HttpExchange ex) throws IOException { + totalRequests.incrementAndGet(); + try { + String body = readBody(ex.getRequestBody()); + String query = ex.getRequestURI().getQuery(); + + simulateLatency(body, query); + + int code = resolveExpectedCode(body, query); + if (code != 200) { + totalErrors.incrementAndGet(); + sendJson(ex, code, errorBody(code)); + return; + } + + // Parse tokens array: "tokens":["tok1","tok2"] + Pattern p = Pattern.compile("\"tokens\"\\s*:\\s*\\[([^\\]]+)\\]"); + Matcher m = p.matcher(body); + StringBuilder sb = new StringBuilder("{\"response\":["); + boolean first = true; + + if (m.find()) { + Matcher tm = Pattern.compile("\"([^\"]+)\"").matcher(m.group(1)); + while (tm.find()) { + String tok = tm.group(1); + if (!first) sb.append(","); + sb.append("{\"token\":\"").append(tok) + .append("\",\"value\":\"plain-").append(tok, 0, Math.min(6, tok.length())) + .append("\",\"httpCode\":200}"); + first = false; + } + } + if (first) { + sb.append("{\"token\":\"mock-token\",\"value\":\"mock-plain-value\",\"httpCode\":200}"); + } + sb.append("]}"); + sendJson(ex, 200, sb.toString()); + + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + sendJson(ex, 500, errorBody(500)); + } catch (IOException e) { + throw e; + } + } + } + + // ========================================================================= + // /metrics — request counters + JVM stats + // ========================================================================= + static class MetricsHandler implements HttpHandler { + @Override + public void handle(HttpExchange ex) throws IOException { + Runtime rt = Runtime.getRuntime(); + long usedMb = (rt.totalMemory() - rt.freeMemory()) / (1024 * 1024); + long maxMb = rt.maxMemory() / (1024 * 1024); + sendJson(ex, 200, String.format( + "{\"total_requests\":%d,\"total_errors\":%d," + + "\"heap_used_mb\":%d,\"heap_max_mb\":%d,\"active_threads\":%d}", + totalRequests.get(), totalErrors.get(), + usedMb, maxMb, Thread.activeCount())); + } + } + + static class HealthHandler implements HttpHandler { + @Override + public void handle(HttpExchange ex) throws IOException { + sendJson(ex, 200, "{\"status\":\"ok\",\"api\":\"v3\"}"); + } + } + + // ========================================================================= + // Shared helpers + // ========================================================================= + + static void simulateLatency(String body, String query) throws InterruptedException { + int ms = defaultWaitMs; + String qv = extractQueryParam(query, "wait_time_ms"); + String bv = extractLong(body, "wait_time_ms"); + if (qv != null) ms = Integer.parseInt(qv); + else if (bv != null) ms = Integer.parseInt(bv); + if (ms > 0) Thread.sleep(ms); + } + + static int resolveExpectedCode(String body, String query) { + if (errorRatePct > 0 && rng.nextInt(100) < errorRatePct) return 500; + String qv = extractQueryParam(query, "expected_response_code"); + String bv = extractLong(body, "expected_response_code"); + if (qv != null) return Integer.parseInt(qv); + if (bv != null) return Integer.parseInt(bv); + return 200; + } + + static String errorBody(int code) { + return "{\"error\":{\"http_code\":" + code + ",\"message\":\"Simulated server error\"}}"; + } + + static void sendJson(HttpExchange ex, int code, String body) throws IOException { + byte[] bytes = body.getBytes(StandardCharsets.UTF_8); + ex.getResponseHeaders().set("Content-Type", "application/json"); + ex.sendResponseHeaders(code, bytes.length); + try (OutputStream os = ex.getResponseBody()) { os.write(bytes); } + } + + static String readBody(InputStream is) throws IOException { + ByteArrayOutputStream buf = new ByteArrayOutputStream(); + byte[] tmp = new byte[4096]; int n; + while ((n = is.read(tmp)) != -1) buf.write(tmp, 0, n); + return buf.toString("UTF-8"); + } + + static String extractQueryParam(String query, String key) { + if (query == null || query.isEmpty()) return null; + for (String pair : query.split("&")) { + String[] kv = pair.split("=", 2); + if (kv.length == 2 && kv[0].equals(key)) return kv[1]; + } + return null; + } + + static String extractLong(String json, String key) { + if (json == null || json.isEmpty()) return null; + Matcher m = Pattern.compile("\"" + key + "\"\\s*:\\s*(\\d+)").matcher(json); + return m.find() ? m.group(1) : null; + } + + static String extractString(String json, String key) { + if (json == null || json.isEmpty()) return null; + Matcher m = Pattern.compile("\"" + key + "\"\\s*:\\s*\"([^\"]+)\"").matcher(json); + return m.find() ? m.group(1) : null; + } + + static int countPattern(String text, String literal) { + if (text == null) return 0; + int count = 0, idx = 0; + while ((idx = text.indexOf(literal, idx)) != -1) { count++; idx += literal.length(); } + return count; + } + + static String uuid() { return UUID.randomUUID().toString(); } + + static String orDefault(String value, String defaultValue) { + return value != null ? value : defaultValue; + } +} diff --git a/load-testing/k6/detokenize.js b/load-testing/k6/detokenize.js new file mode 100644 index 00000000..546854df --- /dev/null +++ b/load-testing/k6/detokenize.js @@ -0,0 +1,69 @@ +/** + * K6 Load Test: skyflow.vault().bulkDetokenize() [v3 SDK] + * + * Flow: K6 → WrapperServer /detokenize → SDK bulkDetokenize() → EchoServer POST /v2/tokens/detokenize + * + * Run: + * k6 run load-testing/k6/detokenize.js + * k6 run --env VUS=100 --env NUM_TOKENS=5 load-testing/k6/detokenize.js + * + * Env vars: + * WRAPPER_URL default: http://localhost:8080 + * VUS virtual users default: 50 + * DURATION total seconds default: 120 + * NUM_TOKENS tokens per SDK call default: 1 + * TOKEN base token string default: mock-token-0000-0000-0000-000000000001 + */ + +import http from 'k6/http'; +import { check } from 'k6'; +import { Rate, Trend } from 'k6/metrics'; + +const BASE_URL = __ENV.WRAPPER_URL || 'http://localhost:8080'; +const VUS = parseInt(__ENV.VUS || '50'); +const DURATION = parseInt(__ENV.DURATION || '120'); +const NUM_TOKENS = parseInt(__ENV.NUM_TOKENS || '1'); +const TOKEN = __ENV.TOKEN || 'mock-token-0000-0000-0000-000000000001'; + +const errorRate = new Rate('detokenize_errors'); +const sdkDuration = new Trend('detokenize_sdk_duration_ms', true); + +export const options = { + stages: [ + { duration: '30s', target: VUS }, + { duration: `${DURATION - 60}s`, target: VUS }, + { duration: '30s', target: 0 }, + ], + thresholds: { + 'http_req_duration': ['p(95)<500'], + 'http_req_failed': ['rate<0.01'], + 'detokenize_errors': ['rate<0.01'], + 'detokenize_sdk_duration_ms': ['p(95)<400'], + }, +}; + +export default function () { + const res = http.post( + `${BASE_URL}/detokenize`, + JSON.stringify({ token: TOKEN, num_tokens: NUM_TOKENS }), + { headers: { 'Content-Type': 'application/json' }, tags: { op: 'detokenize' } } + ); + + const ok = check(res, { + 'detokenize: status 200': (r) => r.status === 200, + 'detokenize: has success/errors': (r) => { + try { + const b = JSON.parse(r.body); + return b.success !== undefined || b.errors !== undefined || r.status === 200; + } catch (_) { return false; } + }, + }); + + errorRate.add(!ok); + sdkDuration.add(res.timings.duration); +} + +export function teardown() { + const res = http.get(`${BASE_URL}/metrics`); + if (res.status === 200) console.log('[WrapperMetrics]', res.body); +} diff --git a/load-testing/k6/insert-fixed.js b/load-testing/k6/insert-fixed.js new file mode 100644 index 00000000..223f25a7 --- /dev/null +++ b/load-testing/k6/insert-fixed.js @@ -0,0 +1,132 @@ +/** + * K6 Fixed-Iteration Load Test: skyflow.vault().bulkInsert() [v3 SDK] + * + * Runs exactly TOTAL_REQUESTS iterations across all VUs, then stops. + * RPS = total_requests / total_time_taken (no ramp-up/ramp-down noise) + * + * Run: + * k6 run load-testing/k6/insert-fixed.js + * k6 run --env TOTAL_REQUESTS=100000 --env VUS=200 load-testing/k6/insert-fixed.js + * + * Env vars: + * WRAPPER_URL default: http://localhost:8080 + * VUS virtual users default: 100 + * TOTAL_REQUESTS total SDK calls default: 100000 + * NUM_RECORDS records per call default: 1 + * TABLE vault table default: load_test_table + */ + +import http from 'k6/http'; +import { check } from 'k6'; +import { Rate, Trend } from 'k6/metrics'; + +const BASE_URL = __ENV.WRAPPER_URL || 'http://localhost:8080'; +const VUS = parseInt(__ENV.VUS || '100'); +const TOTAL_REQUESTS = parseInt(__ENV.TOTAL_REQUESTS || '100000'); +const NUM_RECORDS = parseInt(__ENV.NUM_RECORDS || '1'); +const TABLE = __ENV.TABLE || 'load_test_table'; + +const errorRate = new Rate('insert_errors'); +const sdkDuration = new Trend('insert_sdk_duration_ms', true); + +export const options = { + vus: VUS, + iterations: TOTAL_REQUESTS, // k6 stops exactly after this many calls + summaryTrendStats: ['avg', 'min', 'med', 'max', 'p(50)', 'p(90)', 'p(95)', 'p(99)'], +}; + +export default function () { + const res = http.post( + `${BASE_URL}/insert`, + JSON.stringify({ table: TABLE, num_records: NUM_RECORDS }), + { headers: { 'Content-Type': 'application/json' } } + ); + + const ok = check(res, { + 'status 200': (r) => r.status === 200, + }); + + errorRate.add(!ok); + sdkDuration.add(res.timings.duration); +} + +export function setup() { + http.post(`${BASE_URL}/reset`); // clear counters so RPS is correct for this run +} + +export function teardown() { + const res = http.get(`${BASE_URL}/metrics`); + if (res.status === 200) console.log('[WrapperMetrics]', res.body); +} + +export function handleSummary(data) { + const reqs = data.metrics['http_reqs']; + const dur = data.metrics['http_req_duration']; + const failed = data.metrics['http_req_failed']; + const sdkDur = data.metrics['insert_sdk_duration_ms']; + + const totalReqs = reqs?.values?.count ?? 0; + const testDurationMs = data?.state?.testRunDurationMs ?? 0; + const testDurationSec = (testDurationMs / 1000).toFixed(3); + + // RPS = total_requests / total_time_taken (what the user asked for) + const rpsRaw = totalReqs > 0 && testDurationMs > 0 + ? totalReqs / (testDurationMs / 1000) + : 0; + const rps = rpsRaw.toFixed(2); + + // k6's own rate (same formula, shown for cross-check) + const k6RateRaw = reqs?.values?.rate ?? 0; + const k6Rate = k6RateRaw.toFixed(2); + + // Delta between the two — should be <5% if both are correct. + // Divergence happens because testRunDurationMs includes teardown() time. + const delta = Math.abs(rpsRaw - k6RateRaw); + const deltaPct = k6RateRaw > 0 ? ((delta / k6RateRaw) * 100).toFixed(1) : '0.0'; + const match = parseFloat(deltaPct) < 5 ? 'OK (within 5%)' : 'WARN (>5% gap — check teardown latency)'; + + const p50 = (dur?.values?.['p(50)'] ?? 0).toFixed(1); + const p95 = (dur?.values?.['p(95)'] ?? 0).toFixed(1); + const p99 = (dur?.values?.['p(99)'] ?? 0).toFixed(1); + const avgMs = (dur?.values?.avg ?? 0).toFixed(1); + const minMs = (dur?.values?.min ?? 0).toFixed(1); + const maxMs = (dur?.values?.max ?? 0).toFixed(1); + const errRate = ((failed?.values?.rate ?? 0) * 100).toFixed(2); + const sdkP50 = (sdkDur?.values?.['p(50)'] ?? 0).toFixed(1); + const sdkP95 = (sdkDur?.values?.['p(95)'] ?? 0).toFixed(1); + const sdkP99 = (sdkDur?.values?.['p(99)'] ?? 0).toFixed(1); + + const summary = ` +╔══════════════════════════════════════════════════╗ +║ FIXED-ITERATION INSERT SUMMARY ║ +╠══════════════════════════════════════════════════╣ +║ Config ║ +║ VUs : ${String(VUS).padStart(24)} ║ +║ Target requests : ${String(TOTAL_REQUESTS).padStart(24)} ║ +║ Completed : ${String(totalReqs).padStart(24)} ║ +║ Test duration : ${(testDurationSec + 's').padStart(24)} ║ +╠══════════════════════════════════════════════════╣ +║ Throughput ║ +║ RPS (total/time) : ${String(rps).padStart(24)} ║ +║ RPS (k6 rate) : ${String(k6Rate).padStart(24)} ║ +║ Delta : ${(delta.toFixed(2) + ' (' + deltaPct + '%)').padStart(24)} ║ +║ Verdict : ${String(match).padStart(24)} ║ +║ Error rate : ${(errRate + '%').padStart(24)} ║ +╠══════════════════════════════════════════════════╣ +║ HTTP round-trip latency (ms) ║ +║ min : ${String(minMs).padStart(24)} ║ +║ avg : ${String(avgMs).padStart(24)} ║ +║ p50 : ${String(p50).padStart(24)} ║ +║ p95 : ${String(p95).padStart(24)} ║ +║ p99 : ${String(p99).padStart(24)} ║ +║ max : ${String(maxMs).padStart(24)} ║ +╠══════════════════════════════════════════════════╣ +║ SDK duration (ms) ║ +║ p50 : ${String(sdkP50).padStart(24)} ║ +║ p95 : ${String(sdkP95).padStart(24)} ║ +║ p99 : ${String(sdkP99).padStart(24)} ║ +╚══════════════════════════════════════════════════╝ +`; + console.log(summary); + return { stdout: summary }; +} diff --git a/load-testing/k6/insert.js b/load-testing/k6/insert.js new file mode 100644 index 00000000..7c8be0b1 --- /dev/null +++ b/load-testing/k6/insert.js @@ -0,0 +1,131 @@ +/** + * K6 Load Test: skyflow.vault().bulkInsert() [v3 SDK] + * + * Flow: K6 → WrapperServer /insert → SDK bulkInsert() → EchoServer POST /v2/records/insert + * + * Run: + * k6 run load-testing/k6/insert.js + * k6 run --env VUS=100 --env DURATION=120 --env TABLE=persons load-testing/k6/insert.js + * + * Env vars: + * WRAPPER_URL default: http://localhost:8080 + * VUS virtual users default: 50 + * DURATION total seconds default: 120 + * NUM_RECORDS records per call default: 1 + * TABLE vault table default: load_test_table + */ + +import http from 'k6/http'; +import { check } from 'k6'; +import { Rate, Trend } from 'k6/metrics'; + +const BASE_URL = __ENV.WRAPPER_URL || 'http://localhost:8080'; +const VUS = parseInt(__ENV.VUS || '50'); +const DURATION = Math.max(61, parseInt(__ENV.DURATION || '120')); // must be > 60 (30s ramp-up + 30s ramp-down) +const NUM_RECORDS = parseInt(__ENV.NUM_RECORDS || '1'); +const TABLE = __ENV.TABLE || 'load_test_table'; + +const errorRate = new Rate('insert_errors'); +const sdkDuration = new Trend('insert_sdk_duration_ms', true); + +export const options = { + stages: [ + { duration: '30s', target: VUS }, + { duration: `${DURATION - 60}s`, target: VUS }, + { duration: '30s', target: 0 }, + ], + thresholds: { + 'http_req_duration': ['p(95)<500'], + 'http_req_failed': ['rate<0.01'], + 'insert_errors': ['rate<0.01'], + 'insert_sdk_duration_ms': ['p(95)<400'], + }, + summaryTrendStats: ['avg', 'min', 'med', 'max', 'p(50)', 'p(90)', 'p(95)', 'p(99)'], +}; + +export default function () { + const res = http.post( + `${BASE_URL}/insert`, + JSON.stringify({ table: TABLE, num_records: NUM_RECORDS }), + { headers: { 'Content-Type': 'application/json' }, tags: { op: 'insert' } } + ); + + const ok = check(res, { + 'insert: status 200': (r) => r.status === 200, + 'insert: has success/errors': (r) => { + try { + const b = JSON.parse(r.body); + return b.success !== undefined || b.errors !== undefined || r.status === 200; + } catch (_) { return false; } + }, + }); + + errorRate.add(!ok); + sdkDuration.add(res.timings.duration); +} + +// setup() runs once before any VU starts. +// Resets WrapperServer counters and records the exact start time. +// Return value is passed as `data` into teardown(). +export function setup() { + http.post(`${BASE_URL}/reset`); + return { startTimeMs: Date.now() }; +} + +// teardown(data) runs once after all VUs finish. +// data.startTimeMs = timestamp from setup() — no ramp-up/teardown noise. +// RPS = total completed requests / (now - startTimeMs) +export function teardown(data) { + const endTimeMs = Date.now(); + const elapsedSec = (endTimeMs - data.startTimeMs) / 1000; + + const metricsRes = http.get(`${BASE_URL}/metrics`); + if (metricsRes.status !== 200) { + console.log('[teardown] Could not fetch metrics'); + return; + } + + const m = JSON.parse(metricsRes.body); + const totalReqs = m.insert.total; + const rps = (totalReqs / elapsedSec).toFixed(2); + + console.log('[WrapperMetrics]', metricsRes.body); + console.log(`[RPS] total=${totalReqs} elapsed=${elapsedSec.toFixed(1)}s rps=${rps}`); +} + +export function handleSummary(data) { + const reqs = data.metrics['http_reqs']; + const dur = data.metrics['http_req_duration']; + const failed = data.metrics['http_req_failed']; + const sdkDur = data.metrics['insert_sdk_duration_ms']; + + const totalReqs = reqs?.values?.count ?? 0; + const k6Rps = (reqs?.values?.rate ?? 0).toFixed(2); + const p50 = (dur?.values?.['p(50)'] ?? 0).toFixed(1); + const p95 = (dur?.values?.['p(95)'] ?? 0).toFixed(1); + const p99 = (dur?.values?.['p(99)'] ?? 0).toFixed(1); + const avgMs = (dur?.values?.avg ?? 0).toFixed(1); + const errRate = ((failed?.values?.rate ?? 0) * 100).toFixed(2); + const sdkP95 = (sdkDur?.values?.['p(95)'] ?? 0).toFixed(1); + + const summary = ` +╔══════════════════════════════════════════╗ +║ K6 INSERT SUMMARY ║ +╠══════════════════════════════════════════╣ +║ Total requests : ${String(totalReqs).padStart(20)} ║ +║ RPS (k6 rate) : ${String(k6Rps).padStart(20)} ║ +║ Error rate : ${(errRate + '%').padStart(20)} ║ +╠══════════════════════════════════════════╣ +║ HTTP latency (ms) ║ +║ avg : ${String(avgMs).padStart(20)} ║ +║ p50 : ${String(p50).padStart(20)} ║ +║ p95 : ${String(p95).padStart(20)} ║ +║ p99 : ${String(p99).padStart(20)} ║ +╠══════════════════════════════════════════╣ +║ SDK duration p95 (ms) ║ +║ p95 : ${String(sdkP95).padStart(20)} ║ +╚══════════════════════════════════════════╝ +`; + console.log(summary); + return { stdout: summary }; +} diff --git a/load-testing/k6/perf.js b/load-testing/k6/perf.js new file mode 100644 index 00000000..fa8d1609 --- /dev/null +++ b/load-testing/k6/perf.js @@ -0,0 +1,128 @@ +/** + * K6 SDK Performance Test — Ramping Arrival Rate (RPS-based) + * + * Workload profile (Linear Step-Up): + * Phase 0 — Baseline : 0 RPS for 1m (cold start memory baseline) + * Phase 1 — Light : 100 RPS for 5m + * Phase 2 — Medium : 500 RPS for 5m + * Phase 3 — High : 1000 RPS for 5m (stress limit) + * Phase 4 — Ramp Down : 500 RPS for 5m + * + * Usage: + * k6 run load-testing/k6/perf.js + * k6 run --env OP=detokenize --env MAX_RPS=500 load-testing/k6/perf.js + * + * Env vars: + * WRAPPER_URL default: http://localhost:8080 + * OP operation: insert | detokenize default: insert + * MAX_RPS peak RPS target default: 1000 + * STEP_DURATION step duration in seconds default: 300 (5m) + * NUM_RECORDS records per bulkInsert call default: 1 + * NUM_TOKENS tokens per bulkDetokenize call default: 1 + * TABLE vault table name default: load_test_table + * TOKEN base token for detokenize default: mock-token-0000-0000-0000-000000000001 + */ + +import http from 'k6/http'; +import { check } from 'k6'; +import { Rate, Trend, Counter } from 'k6/metrics'; + +// -- Config ------------------------------------------------------------------ +const BASE_URL = __ENV.WRAPPER_URL || 'http://localhost:8080'; +const OP = __ENV.OP || 'insert'; +const MAX_RPS = parseInt(__ENV.MAX_RPS || '1000'); +const STEP_DURATION = parseInt(__ENV.STEP_DURATION || '300'); // seconds per phase +const NUM_RECORDS = parseInt(__ENV.NUM_RECORDS || '1'); +const NUM_TOKENS = parseInt(__ENV.NUM_TOKENS || '1'); +const TABLE = __ENV.TABLE || 'load_test_table'; +const TOKEN = __ENV.TOKEN || 'mock-token-0000-0000-0000-000000000001'; + +// Derived RPS targets per phase +const LIGHT_RPS = Math.round(MAX_RPS * 0.10); // 10% of peak +const MEDIUM_RPS = Math.round(MAX_RPS * 0.50); // 50% of peak +const HIGH_RPS = MAX_RPS; // 100% of peak (stress) +const DOWN_RPS = Math.round(MAX_RPS * 0.50); // 50% on ramp-down + +// -- Custom Metrics ---------------------------------------------------------- +const errorRate = new Rate('sdk_error_rate'); +const sdkLatency = new Trend('sdk_latency_ms', true); +const sdkRPS = new Counter('sdk_requests_total'); + +// -- Workload Profile -------------------------------------------------------- +export const options = { + scenarios: { + sdk_perf: { + executor: 'ramping-arrival-rate', + startRate: 0, + timeUnit: '1s', + preAllocatedVUs: 200, + maxVUs: 1000, + stages: [ + // Phase 0: Baseline — cold start, 0 RPS + { target: 0, duration: '1m' }, + // Ramp to Phase 1 + { target: LIGHT_RPS, duration: '30s' }, + // Phase 1: Light Load — 10% of peak + { target: LIGHT_RPS, duration: `${STEP_DURATION}s` }, + // Ramp to Phase 2 + { target: MEDIUM_RPS, duration: '1m' }, + // Phase 2: Medium Load — 50% of peak + { target: MEDIUM_RPS, duration: `${STEP_DURATION}s` }, + // Ramp to Phase 3 + { target: HIGH_RPS, duration: '2m' }, + // Phase 3: High Load / Stress — 100% of peak + { target: HIGH_RPS, duration: `${STEP_DURATION}s` }, + // Phase 4: Ramp Down — 50% of peak + { target: DOWN_RPS, duration: '1m' }, + { target: DOWN_RPS, duration: `${STEP_DURATION}s` }, + // Cool down + { target: 0, duration: '30s' }, + ], + }, + }, + + thresholds: { + 'http_req_duration': ['p(95)<500', 'p(99)<1000'], + 'http_req_failed': ['rate<0.05'], + 'sdk_error_rate': ['rate<0.05'], + 'sdk_latency_ms': ['p(95)<400', 'p(99)<800'], + }, +}; + +// -- Payload builders -------------------------------------------------------- +function insertPayload() { + return JSON.stringify({ table: TABLE, num_records: NUM_RECORDS }); +} + +function detokenizePayload() { + return JSON.stringify({ token: TOKEN, num_tokens: NUM_TOKENS }); +} + +// -- Main test function ------------------------------------------------------ +export default function () { + const endpoint = OP === 'detokenize' ? '/detokenize' : '/insert'; + const body = OP === 'detokenize' ? detokenizePayload() : insertPayload(); + + const res = http.post(`${BASE_URL}${endpoint}`, body, { + headers: { 'Content-Type': 'application/json' }, + tags: { op: OP }, + }); + + const ok = check(res, { + [`${OP}: status 200`]: (r) => r.status === 200, + [`${OP}: response not empty`]: (r) => r.body && r.body.length > 0, + }); + + errorRate.add(!ok); + sdkLatency.add(res.timings.duration); + sdkRPS.add(1); +} + +// -- Teardown: pull final JVM metrics ---------------------------------------- +export function teardown() { + const res = http.get(`${BASE_URL}/metrics`); + if (res.status === 200) { + console.log('\n[SDK JVM Metrics at teardown]'); + console.log(res.body); + } +} diff --git a/load-testing/node-echo-server/EchoServer.js b/load-testing/node-echo-server/EchoServer.js new file mode 100644 index 00000000..eef7d845 --- /dev/null +++ b/load-testing/node-echo-server/EchoServer.js @@ -0,0 +1,244 @@ +/** + * Skyflow SDK Load Testing - Echo/Mock Server (v3 SDK) — Node.js port + * + * Simulates only the Vault v3 API endpoints that VaultController actually calls: + * POST /v2/records/insert <- bulkInsert() / bulkInsertAsync() + * POST /v2/tokens/detokenize <- bulkDetokenize() / bulkDetokenizeAsync() + * + * Usage: + * node EchoServer.js [port] [wait_time_ms] [error_rate_percent] + * + * Examples: + * node EchoServer.js 3015 # defaults: port=3015, wait=0ms, error=0% + * node EchoServer.js 3015 50 # 50 ms simulated latency per request + * node EchoServer.js 3015 50 10 # 50 ms latency + 10 % random 5xx + * + * wait_time_ms / expected_response_code can also be passed per-request: + * - as JSON fields in the request body (e.g. {"wait_time_ms":50,...}) + * - or as query params (?wait_time_ms=50&expected_response_code=500) + */ + +'use strict'; + +const http = require('http'); +const { randomUUID } = require('crypto'); +const { URL } = require('url'); + +// ─── Configuration (set from CLI args) ─────────────────────────────────────── +const PORT = parseInt(process.argv[2]) || 3015; +const DEFAULT_WAIT = parseInt(process.argv[3]) || 0; +const ERROR_RATE = parseInt(process.argv[4]) || 0; + +// ─── Counters ───────────────────────────────────────────────────────────────── +let totalRequests = 0n; +let totalErrors = 0n; + +// ─── Helpers ────────────────────────────────────────────────────────────────── + +function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +function extractQueryParam(query, key) { + if (!query) return null; + const params = new URLSearchParams(query); + return params.has(key) ? params.get(key) : null; +} + +function extractJsonNumber(json, key) { + const m = new RegExp(`"${key}"\\s*:\\s*(\\d+)`).exec(json); + return m ? m[1] : null; +} + +function extractJsonString(json, key) { + const m = new RegExp(`"${key}"\\s*:\\s*"([^"]+)"`).exec(json); + return m ? m[1] : null; +} + +function countOccurrences(text, literal) { + let count = 0; + let idx = 0; + while ((idx = text.indexOf(literal, idx)) !== -1) { count++; idx += literal.length; } + return count; +} + +async function simulateLatency(body, query) { + let ms = DEFAULT_WAIT; + const qv = extractQueryParam(query, 'wait_time_ms'); + const bv = extractJsonNumber(body, 'wait_time_ms'); + if (qv !== null) ms = parseInt(qv); + else if (bv !== null) ms = parseInt(bv); + if (ms > 0) await sleep(ms); +} + +function resolveExpectedCode(body, query) { + if (ERROR_RATE > 0 && Math.random() * 100 < ERROR_RATE) return 500; + const qv = extractQueryParam(query, 'expected_response_code'); + const bv = extractJsonNumber(body, 'expected_response_code'); + if (qv !== null) return parseInt(qv); + if (bv !== null) return parseInt(bv); + return 200; +} + +function errorBody(code) { + return JSON.stringify({ error: { http_code: code, message: 'Simulated server error' } }); +} + +function sendJson(res, code, body) { + const bytes = Buffer.from(body, 'utf8'); + res.writeHead(code, { + 'Content-Type': 'application/json', + 'Content-Length': bytes.length, + }); + res.end(bytes); +} + +function readBody(req) { + return new Promise((resolve, reject) => { + const chunks = []; + req.on('data', chunk => chunks.push(chunk)); + req.on('end', () => resolve(Buffer.concat(chunks).toString('utf8'))); + req.on('error', reject); + }); +} + +// ─── Route handlers ─────────────────────────────────────────────────────────── + +/** + * POST /v2/records/insert + * + * SDK request body: + * {"vaultId":"...","tableName":"...","records":[{"data":{"col":"val"},...}],"upsert":{...}} + * + * Expected response: + * {"records":[{"skyflowID":"uuid","tokens":{"mock_field":"tok-XXXXXXXX"},"tableName":"tbl","httpCode":200}]} + */ +async function handleInsert(req, res, body, query) { + await simulateLatency(body, query); + + const code = resolveExpectedCode(body, query); + if (code !== 200) { + totalErrors++; + sendJson(res, code, errorBody(code)); + return; + } + + let count = countOccurrences(body, '"data"'); + if (count === 0) count = 1; + const table = extractJsonString(body, 'tableName') || 'load_test_table'; + + const records = []; + for (let i = 0; i < count; i++) { + const id = randomUUID(); + records.push({ + skyflowID: id, + tokens: { mock_field: `tok-${id.slice(0, 8)}` }, + tableName: table, + httpCode: 200, + }); + } + sendJson(res, 200, JSON.stringify({ records })); +} + +/** + * POST /v2/tokens/detokenize + * + * SDK request body: + * {"vaultId":"...","tokens":["tok1","tok2"],"tokenGroupRedactions":[...]} + * + * Expected response: + * {"response":[{"token":"tok1","value":"plain-tok1xx","httpCode":200},...]} + */ +async function handleDetokenize(req, res, body, query) { + await simulateLatency(body, query); + + const code = resolveExpectedCode(body, query); + if (code !== 200) { + totalErrors++; + sendJson(res, code, errorBody(code)); + return; + } + + const tokensMatch = /"tokens"\s*:\s*\[([^\]]+)\]/.exec(body); + const response = []; + + if (tokensMatch) { + const tokenStr = tokensMatch[1]; + const tokenRe = /"([^"]+)"/g; + let m; + while ((m = tokenRe.exec(tokenStr)) !== null) { + const tok = m[1]; + response.push({ + token: tok, + value: `plain-${tok.slice(0, 6)}`, + httpCode: 200, + }); + } + } + + if (response.length === 0) { + response.push({ token: 'mock-token', value: 'mock-plain-value', httpCode: 200 }); + } + + sendJson(res, 200, JSON.stringify({ response })); +} + +/** + * GET /metrics — request counters + process memory stats + */ +function handleMetrics(req, res) { + const mem = process.memoryUsage(); + const usedMb = Math.round(mem.heapUsed / 1024 / 1024); + const maxMb = Math.round(mem.heapTotal / 1024 / 1024); + sendJson(res, 200, JSON.stringify({ + total_requests: Number(totalRequests), + total_errors: Number(totalErrors), + heap_used_mb: usedMb, + heap_total_mb: maxMb, + })); +} + +/** + * GET /health + */ +function handleHealth(req, res) { + sendJson(res, 200, JSON.stringify({ status: 'ok', api: 'v3' })); +} + +// ─── Server ─────────────────────────────────────────────────────────────────── + +const server = http.createServer(async (req, res) => { + totalRequests++; + + const parsed = new URL(req.url, `http://localhost:${PORT}`); + const path = parsed.pathname; + const query = parsed.search ? parsed.search.slice(1) : null; // strip leading '?' + + try { + if (path === '/v2/records/insert' && req.method === 'POST') { + const body = await readBody(req); + await handleInsert(req, res, body, query); + } else if (path === '/v2/tokens/detokenize' && req.method === 'POST') { + const body = await readBody(req); + await handleDetokenize(req, res, body, query); + + } else if (path === '/metrics') { + handleMetrics(req, res); + + } else if (path === '/health') { + handleHealth(req, res); + + } else { + const body = await readBody(req); + await handleInsert(req, res, body, query); + + } + } catch (err) { + console.error('[EchoServer] Error:', err); + sendJson(res, 500, errorBody(500)); + } +}); + +server.listen(PORT, () => { + console.log(`[EchoServer-v3] port=${PORT} wait=${DEFAULT_WAIT}ms error_rate=${ERROR_RATE}%`); +}); diff --git a/load-testing/node-echo-server/package-lock.json b/load-testing/node-echo-server/package-lock.json new file mode 100644 index 00000000..559b32f8 --- /dev/null +++ b/load-testing/node-echo-server/package-lock.json @@ -0,0 +1,827 @@ +{ + "name": "skyflow-mock-server", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "skyflow-mock-server", + "version": "1.0.0", + "dependencies": { + "express": "^4.18.2" + } + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/body-parser": { + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz", + "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==", + "license": "MIT", + "dependencies": { + "bytes": "~3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "~1.2.0", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "on-finished": "~2.4.1", + "qs": "~6.14.0", + "raw-body": "~2.5.3", + "type-is": "~1.6.18", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz", + "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express": { + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz", + "integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "~1.20.3", + "content-disposition": "~0.5.4", + "content-type": "~1.0.4", + "cookie": "~0.7.1", + "cookie-signature": "~1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "~1.3.1", + "fresh": "~0.5.2", + "http-errors": "~2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "~2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "~0.1.12", + "proxy-addr": "~2.0.7", + "qs": "~6.14.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "~0.19.0", + "serve-static": "~1.16.2", + "setprototypeof": "1.2.0", + "statuses": "~2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/finalhandler": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz", + "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "~2.4.1", + "parseurl": "~1.3.3", + "statuses": "~2.0.2", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.14.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", + "integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz", + "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==", + "license": "MIT", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/send": { + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz", + "integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "~0.5.2", + "http-errors": "~2.0.1", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "~2.4.1", + "range-parser": "~1.2.1", + "statuses": "~2.0.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/serve-static": { + "version": "1.16.3", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz", + "integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "~0.19.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + } + } +} diff --git a/load-testing/node-echo-server/package.json b/load-testing/node-echo-server/package.json new file mode 100644 index 00000000..68323849 --- /dev/null +++ b/load-testing/node-echo-server/package.json @@ -0,0 +1,13 @@ +{ + "name": "skyflow-mock-server", + "version": "1.0.0", + "description": "Mock Skyflow server for SDK load testing", + "main": "server.js", + "scripts": { + "start": "node server.js", + "dev": "node --watch server.js" + }, + "dependencies": { + "express": "^4.18.2" + } +} \ No newline at end of file diff --git a/load-testing/node-echo-server/server.js b/load-testing/node-echo-server/server.js new file mode 100644 index 00000000..b481eade --- /dev/null +++ b/load-testing/node-echo-server/server.js @@ -0,0 +1,161 @@ +'use strict'; + +const express = require('express'); +const app = express(); +app.use(express.json()); + +const PORT = process.env.PORT || 3015; + +// Fallback latency/error-code when the request body doesn't specify them. +// Set via docker-compose environment: MOCK_WAIT_MS=50, MOCK_RESPONSE_CODE=200 +const DEFAULT_WAIT_MS = 0; // parseInt(process.env.MOCK_WAIT_MS ?? 0, 10); +const DEFAULT_RESP_CODE = parseInt(process.env.MOCK_RESPONSE_CODE ?? 200, 10); + +// ─── Metrics ──────────────────────────────────────────────────────────────── + +const metrics = { + totalRequests: 0, + insertRequests: 0, + detokenizeRequests: 0, + errorResponses: 0, + totalDelayMs: 0, +}; + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +/** + * Pull __wait_time_ms and __expected_response_code out of the request body. + * Falls back to env-var defaults (MOCK_WAIT_MS / MOCK_RESPONSE_CODE). + */ +function extractParams(body) { + return { + waitMs: parseInt(body.__wait_time_ms ?? DEFAULT_WAIT_MS, 10), + responseCode: parseInt(body.__expected_response_code ?? DEFAULT_RESP_CODE, 10), + }; +} + +/** + * Sleep for ms milliseconds. + */ +function sleep(ms) { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Send a mock error shaped like a real Skyflow error. + */ +function sendError(res, code) { + metrics.errorResponses++; + res.status(code).json({ + error: { + http_code: code, + message: `Mock server returning error ${code}`, + request_id: `mock-req-${Date.now()}`, + details: [], + }, + }); +} + +// ─── Auth ───────────────────────────────────────────────────────────────────── +// SDK calls this first. Always return a token so the SDK doesn't block. + +app.post('/v2/auth/sa/oauth/token', (_req, res) => { + res.json({ + accessToken: 'mock-bearer-token', + tokenType: 'Bearer', + }); +}); + +// ─── Insert ─────────────────────────────────────────────────────────────────── +// POST /v2/vaults/:vaultId/:table +// Works for single (records.length=1) and bulk (records.length=N) — same endpoint. + +app.post('/v2/records/insert', async (req, res) => { + metrics.totalRequests++; + metrics.insertRequests++; + + const { waitMs, responseCode } = extractParams(req.body); + + if (waitMs > 0) { + metrics.totalDelayMs += waitMs; + await sleep(waitMs); + } + + if (responseCode !== 200) { + return sendError(res, responseCode); + } + + const incomingRecords = req.body.records ?? [{}]; + + const records = incomingRecords.map((_, i) => ({ + skyflow_id: `mock-id-${Date.now()}-${i}`, + tokens: { + card_number: `tok-${Date.now()}-${i}`, + }, + })); + + res.json({ records }); +}); + +// ─── Detokenize ─────────────────────────────────────────────────────────────── +// POST /v2/vaults/:vaultId/detokenize +// Works for single and bulk — array size in detokenizationParameters. + +app.post('/v2/vaults/:vaultId/detokenize', async (req, res) => { + metrics.totalRequests++; + metrics.detokenizeRequests++; + + const { waitMs, responseCode } = extractParams(req.body); + + if (waitMs > 0) { + metrics.totalDelayMs += waitMs; + await sleep(waitMs); + } + + if (responseCode !== 200) { + return sendError(res, responseCode); + } + + const params = req.body.detokenizationParameters ?? []; + + const records = params.map((p) => ({ + token: p.token, + value: '4111111111111111', // mock plain-text value + valueType: 'STRING', + })); + + res.json({ records }); +}); + +// ─── Metrics ────────────────────────────────────────────────────────────────── +// Prometheus-style text — Prometheus can scrape this, or you can curl it. + +app.get('/metrics', (_req, res) => { + res.type('text/plain').send( + [ + `mock_total_requests ${metrics.totalRequests}`, + `mock_insert_requests ${metrics.insertRequests}`, + `mock_detokenize_requests ${metrics.detokenizeRequests}`, + `mock_error_responses ${metrics.errorResponses}`, + `mock_total_delay_ms ${metrics.totalDelayMs}`, + ].join('\n') + '\n' + ); +}); + +// ─── Health ─────────────────────────────────────────────────────────────────── + +app.get('/health', (_req, res) => { + res.json({ status: 'ok', uptime: process.uptime() }); +}); + +// ─── Start ──────────────────────────────────────────────────────────────────── + +app.listen(PORT, () => { + console.log(`Mock Skyflow server running on port ${PORT}`); + console.log('Routes:'); + console.log(' POST /v2/auth/sa/oauth/token'); + console.log(' POST /v2/vaults/:vaultId/:table (insert)'); + console.log(' POST /v2/vaults/:vaultId/detokenize (detokenize)'); + console.log(' GET /metrics'); + console.log(' GET /health'); +}); \ No newline at end of file diff --git a/load-testing/perf-run.sh b/load-testing/perf-run.sh new file mode 100755 index 00000000..179a1fe4 --- /dev/null +++ b/load-testing/perf-run.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +# ============================================================================= +# Skyflow Java SDK v3 — SDK Performance Test Orchestrator +# +# Runs the full three-phase performance test (per the SDK perf testing spec): +# Phase 1: 1 CPU / 512 MB RAM +# Phase 2: 2 CPUs / 512 MB RAM +# Phase 3: 4 CPUs / 1 GB RAM +# +# Each phase runs the full step-up workload: +# Baseline (0 RPS) → Light (100) → Medium (500) → High (1000) → Ramp-down (500) +# +# Metrics collected per phase: +# External: CPU%, RAM (via docker stats → CSV) +# Internal: Heap, Threads, GC pause time, RPS (via /metrics polling → JSONL) +# k6: Latency p50/p95/p99, error rate, RPS (via k6 JSON output) +# +# Usage: +# ./load-testing/perf-run.sh [insert|detokenize] [max_rps] +# +# Examples: +# ./load-testing/perf-run.sh insert 1000 +# ./load-testing/perf-run.sh detokenize 500 +# +# Output: load-testing/results/YYYY-MM-DD_HH-MM-SS/ +# ============================================================================= +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +COMPOSE_FILE="$SCRIPT_DIR/docker-compose.perf.yml" + +OP="${1:-insert}" +MAX_RPS="${2:-1000}" +STEP_DURATION="${STEP_DURATION:-300}" # seconds per phase (default 5m) +METRICS_POLL_INTERVAL=5 # seconds between /metrics polls +WRAPPER_URL="http://localhost:8080" + +TIMESTAMP="$(date +%Y-%m-%d_%H-%M-%S)" +RESULTS_DIR="$SCRIPT_DIR/results/$TIMESTAMP" +mkdir -p "$RESULTS_DIR" + +# Resource configs per phase +declare -a CPU_LIMITS=("1" "2" "4") +declare -a MEM_LIMITS=("512m" "512m" "1g") +declare -a PHASE_NAMES=("phase1_1cpu_512mb" "phase2_2cpu_512mb" "phase3_4cpu_1gb") + +log() { echo "[perf-run] $*"; } +fail() { echo "[perf-run] ERROR: $*" >&2; exit 1; } + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +start_docker_stats_collector() { + local phase="$1" + local out="$RESULTS_DIR/${phase}_docker_stats.csv" + echo "timestamp,container,cpu_pct,mem_usage,mem_limit,mem_pct,net_in,net_out" > "$out" + # docker stats streams continuously; we append timestamped rows + docker stats --no-trunc --format \ + "{{.Name}},{{.CPUPerc}},{{.MemUsage}},{{.MemPerc}},{{.NetIO}}" \ + skyflow-sut 2>/dev/null | while IFS= read -r line; do + echo "$(date +%s),$line" >> "$out" + done & + echo $! +} + +start_jvm_metrics_collector() { + local phase="$1" + local out="$RESULTS_DIR/${phase}_jvm_metrics.jsonl" + ( + while true; do + ts="$(date +%s)" + payload="$(curl -sf "$WRAPPER_URL/metrics" 2>/dev/null || echo '{}')" + echo "{\"ts\":$ts,\"metrics\":$payload}" >> "$out" + sleep "$METRICS_POLL_INTERVAL" + done + ) & + echo $! +} + +stop_collectors() { + local stats_pid="$1" + local jvm_pid="$2" + kill "$stats_pid" "$jvm_pid" 2>/dev/null || true +} + +run_phase() { + local phase_idx="$1" + local cpu="${CPU_LIMITS[$phase_idx]}" + local mem="${MEM_LIMITS[$phase_idx]}" + local phase_name="${PHASE_NAMES[$phase_idx]}" + local k6_out="$RESULTS_DIR/${phase_name}_k6_output.json" + local k6_summary="$RESULTS_DIR/${phase_name}_k6_summary.json" + + log "========================================================" + log "Starting $phase_name (CPU=$cpu MEM=$mem MAX_RPS=$MAX_RPS OP=$OP)" + log "========================================================" + + # Tear down any previous run + docker compose -f "$COMPOSE_FILE" down --remove-orphans 2>/dev/null || true + sleep 2 + + # Start SUT container with phase resource limits + CPU_LIMIT="$cpu" MEM_LIMIT="$mem" \ + OP="$OP" MAX_RPS="$MAX_RPS" STEP_DURATION="$STEP_DURATION" \ + docker compose -f "$COMPOSE_FILE" up -d sut + + # Wait for SUT to be healthy + log "Waiting for SUT to become healthy..." + for i in $(seq 1 30); do + curl -sf "$WRAPPER_URL/health" > /dev/null 2>&1 && break + sleep 2 + done + curl -sf "$WRAPPER_URL/health" > /dev/null || fail "SUT never became healthy" + log "SUT is healthy." + + # Start background collectors + STATS_PID="$(start_docker_stats_collector "$phase_name")" + JVM_PID="$(start_jvm_metrics_collector "$phase_name")" + log "Collectors started (docker-stats PID=$STATS_PID, jvm-poll PID=$JVM_PID)" + + # Run k6 directly (not via docker compose) so output lands locally + log "Running k6 perf test..." + k6 run \ + --env "WRAPPER_URL=$WRAPPER_URL" \ + --env "OP=$OP" \ + --env "MAX_RPS=$MAX_RPS" \ + --env "STEP_DURATION=$STEP_DURATION" \ + --out "json=$k6_out" \ + --summary-export "$k6_summary" \ + "$SCRIPT_DIR/k6/perf.js" || true # don't abort on threshold breach + + stop_collectors "$STATS_PID" "$JVM_PID" + log "$phase_name complete. Results saved to $RESULTS_DIR/" + + docker compose -f "$COMPOSE_FILE" down 2>/dev/null || true + sleep 3 +} + +print_summary() { + log "" + log "============================================================" + log "PERFORMANCE TEST COMPLETE — Results: $RESULTS_DIR" + log "============================================================" + log "" + log "Files generated:" + for f in "$RESULTS_DIR"/*; do + log " $(basename "$f")" + done + log "" + log "Quick summary (p95 latency per phase):" + for phase_name in "${PHASE_NAMES[@]}"; do + local summary="$RESULTS_DIR/${phase_name}_k6_summary.json" + if [[ -f "$summary" ]]; then + p95=$(python3 -c " +import json, sys +d = json.load(open('$summary')) +v = d.get('metrics',{}).get('http_req_duration',{}).get('values',{}).get('p(95)', 'N/A') +print(v) +" 2>/dev/null || echo "N/A") + log " $phase_name → p95 = ${p95}ms" + fi + done +} + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- +log "SDK Performance Test" +log " Operation : $OP" +log " Max RPS : $MAX_RPS" +log " Step length: ${STEP_DURATION}s per phase" +log " Results : $RESULTS_DIR" +log "" + +# Check prerequisites +command -v docker > /dev/null || fail "docker not found" +command -v k6 > /dev/null || fail "k6 not found" + +# Run all three phases +for i in 0 1 2; do + run_phase "$i" +done + +print_summary diff --git a/load-testing/poll-metrics.sh b/load-testing/poll-metrics.sh new file mode 100755 index 00000000..c4dae2c9 --- /dev/null +++ b/load-testing/poll-metrics.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash +# ============================================================================= +# poll-metrics.sh — Continuously poll WrapperServer /metrics and save to file +# +# Usage: +# ./load-testing/poll-metrics.sh # defaults +# INTERVAL=2 ./load-testing/poll-metrics.sh # poll every 2s +# WRAPPER_PORT=9090 ./load-testing/poll-metrics.sh # different port +# +# Environment variables: +# WRAPPER_PORT WrapperServer port (default: 8080) +# INTERVAL Seconds between polls (default: 5) +# RESULTS_DIR Output directory (default: load-testing/results) +# ============================================================================= +set -euo pipefail + +WRAPPER_PORT="${WRAPPER_PORT:-8080}" +INTERVAL="${INTERVAL:-5}" + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +RESULTS_DIR="${RESULTS_DIR:-$SCRIPT_DIR/results}" + +mkdir -p "$RESULTS_DIR" + +TIMESTAMP="$(date '+%Y-%m-%d_%H-%M-%S')" +OUTPUT_FILE="$RESULTS_DIR/metrics-${TIMESTAMP}.txt" + +# Track peak values across all snapshots +PEAK_INSERT_RPS="0" +PEAK_DETOKENIZE_RPS="0" +PEAK_THREADS="0" +SNAPSHOT_COUNT=0 + +echo "[poll-metrics] Polling http://localhost:${WRAPPER_PORT}/metrics every ${INTERVAL}s" +echo "[poll-metrics] Saving to: $OUTPUT_FILE" +echo "[poll-metrics] Press Ctrl-C to stop." +echo "" + +# Extract a numeric field from JSON using python3 +extract() { + local json="$1" field="$2" + python3 -c " +import json, sys +try: + d = json.loads('''$json''') + keys = '$field'.split('.') + v = d + for k in keys: v = v[k] + print(v) +except: print(0) +" 2>/dev/null || echo "0" +} + +# Return the larger of two numbers (supports decimals) +max_of() { + python3 -c "a,b=float('$1'),float('$2'); print(a if a>b else b)" 2>/dev/null || echo "$1" +} + +pretty_print() { + if command -v python3 &>/dev/null; then + python3 -m json.tool 2>/dev/null || cat + else + cat + fi +} + +cleanup() { + echo "" + echo "================================================================" + echo " PEAK METRICS SUMMARY ($OUTPUT_FILE)" + echo "================================================================" + echo " Snapshots collected : $SNAPSHOT_COUNT" + echo " Peak insert RPS : $PEAK_INSERT_RPS" + echo " Peak detokenize RPS : $PEAK_DETOKENIZE_RPS" + echo " Peak threads : $PEAK_THREADS" + echo "================================================================" + echo "" + + # Also append the summary to the results file + { + echo "" + echo "================================================================" + echo " PEAK METRICS SUMMARY" + echo "================================================================" + echo " Snapshots collected : $SNAPSHOT_COUNT" + echo " Peak insert RPS : $PEAK_INSERT_RPS" + echo " Peak detokenize RPS : $PEAK_DETOKENIZE_RPS" + echo " Peak threads : $PEAK_THREADS" + echo "================================================================" + } >> "$OUTPUT_FILE" + + echo "[poll-metrics] Results saved to: $OUTPUT_FILE" +} +trap cleanup EXIT INT TERM + +while true; do + TS="$(date '+%Y-%m-%d %H:%M:%S')" + SNAPSHOT="$(curl -sf "http://localhost:${WRAPPER_PORT}/metrics" 2>/dev/null || echo '{"error":"metrics endpoint unreachable"}')" + PRETTY="$(echo "$SNAPSHOT" | pretty_print)" + + # Extract current values + CUR_INSERT_RPS="$(extract "$SNAPSHOT" 'insert.rps')" + CUR_DETOKENIZE_RPS="$(extract "$SNAPSHOT" 'detokenize.rps')" + CUR_THREADS="$(extract "$SNAPSHOT" 'jvm.threads_current')" + + # Update peaks + PEAK_INSERT_RPS="$(max_of "$PEAK_INSERT_RPS" "$CUR_INSERT_RPS")" + PEAK_DETOKENIZE_RPS="$(max_of "$PEAK_DETOKENIZE_RPS" "$CUR_DETOKENIZE_RPS")" + PEAK_THREADS="$(max_of "$PEAK_THREADS" "$CUR_THREADS")" + + SNAPSHOT_COUNT=$((SNAPSHOT_COUNT + 1)) + + ENTRY="[${TS}] insert_rps=${CUR_INSERT_RPS} threads=${CUR_THREADS} +${PRETTY} +" + + # Append to file + printf '%s\n' "$ENTRY" >> "$OUTPUT_FILE" + + # Print to stdout + printf '%s\n' "$ENTRY" + + sleep "$INTERVAL" +done diff --git a/load-testing/run-samples.sh b/load-testing/run-samples.sh new file mode 100755 index 00000000..62adf743 --- /dev/null +++ b/load-testing/run-samples.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash +# ============================================================================= +# Skyflow Java SDK v3 — Sample Runner +# +# Starts the EchoServer (mock vault), then runs one or all SDK samples, +# capturing metrics output to a timestamped results file. +# +# Usage: +# ./load-testing/run-samples.sh [sample] [options] +# +# Samples: +# all Run all samples + print comparison table (default) +# insert InsertSample — sync bulk insert +# detokenize DetokenizeSample — sync bulk detokenize +# async-insert AsyncInsertSample — concurrent async insert +# async-detokenize AsyncDetokenizeSample — concurrent async detokenize +# concurrent ConcurrentSample — all 4 concurrent patterns +# retry RetryOnFailureSample — retry on partial failure +# benchmark BenchmarkSample — sustained load across 3 concurrency tiers +# +# Examples: +# ./load-testing/run-samples.sh +# ./load-testing/run-samples.sh async-insert +# ./load-testing/run-samples.sh all +# ECHO_WAIT_MS=50 ./load-testing/run-samples.sh concurrent +# ./load-testing/run-samples.sh benchmark +# BENCH_DURATION=60 BENCH_OP=detokenize ./load-testing/run-samples.sh benchmark +# +# Environment variables: +# ECHO_PORT EchoServer port (default: 3015) +# ECHO_WAIT_MS Simulated vault latency ms (default: 0) +# ECHO_ERR_PCT Random error rate % (default: 0) +# SAVE_RESULTS Save output to file (default: true) +# BENCH_DURATION Benchmark seconds per tier (default: 30) +# BENCH_OP Benchmark operation (default: insert) +# BENCH_BATCH Records/tokens per SDK call (default: 1) +# ============================================================================= +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +ECHO_PORT="${ECHO_PORT:-3015}" +ECHO_WAIT_MS="${ECHO_WAIT_MS:-0}" +ECHO_ERR_PCT="${ECHO_ERR_PCT:-0}" +SAVE_RESULTS="${SAVE_RESULTS:-true}" +BENCH_DURATION="${BENCH_DURATION:-30}" +BENCH_OP="${BENCH_OP:-insert}" +BENCH_BATCH="${BENCH_BATCH:-1}" + +SAMPLE="${1:-all}" + +TIMESTAMP="$(date +%Y-%m-%d_%H-%M-%S)" +RESULTS_DIR="$SCRIPT_DIR/results" +mkdir -p "$RESULTS_DIR" +RESULTS_FILE="$RESULTS_DIR/samples-${SAMPLE}-${TIMESTAMP}.txt" + +ECHO_PID="" + +# --------------------------------------------------------------------------- +# Cleanup +# --------------------------------------------------------------------------- +cleanup() { + echo "" + echo "[run-samples] Stopping EchoServer..." + [ -n "$ECHO_PID" ] && kill "$ECHO_PID" 2>/dev/null || true + wait 2>/dev/null || true + echo "[run-samples] Done." +} +trap cleanup EXIT INT TERM + +# --------------------------------------------------------------------------- +# Map sample name → main class +# --------------------------------------------------------------------------- +sample_class() { + case "$1" in + all) echo "com.skyflow.loadtest.samples.RunAllSamples" ;; + insert) echo "com.skyflow.loadtest.samples.InsertSample" ;; + detokenize) echo "com.skyflow.loadtest.samples.DetokenizeSample" ;; + async-insert) echo "com.skyflow.loadtest.samples.AsyncInsertSample" ;; + async-detokenize) echo "com.skyflow.loadtest.samples.AsyncDetokenizeSample" ;; + concurrent) echo "com.skyflow.loadtest.samples.ConcurrentSample" ;; + retry) echo "com.skyflow.loadtest.samples.RetryOnFailureSample" ;; + benchmark) echo "com.skyflow.loadtest.samples.BenchmarkSample" ;; + *) + echo "[run-samples] Unknown sample '$1'." >&2 + echo " Valid: all | insert | detokenize | async-insert | async-detokenize | concurrent | retry" >&2 + exit 1 + ;; + esac +} + +MAIN_CLASS="$(sample_class "$SAMPLE")" + +# --------------------------------------------------------------------------- +# 1. Build wrapper (compile only — fast) +# --------------------------------------------------------------------------- +echo "[run-samples] Compiling wrapper..." +mvn compile \ + -f "$SCRIPT_DIR/wrapper/pom.xml" \ + -Dgpg.skip=true \ + -q +echo "[run-samples] Compile done." + +# --------------------------------------------------------------------------- +# 2. Compile and start EchoServer +# --------------------------------------------------------------------------- +echo "[run-samples] Compiling EchoServer..." +javac -d "$SCRIPT_DIR/echo-server/" "$SCRIPT_DIR/echo-server/EchoServer.java" + +# Kill any stale process on the port before starting +if lsof -ti :"$ECHO_PORT" > /dev/null 2>&1; then + echo "[run-samples] Port $ECHO_PORT in use — killing stale process..." + lsof -ti :"$ECHO_PORT" | xargs kill -9 2>/dev/null || true + sleep 1 +fi + +echo "[run-samples] Starting EchoServer on :$ECHO_PORT (wait=${ECHO_WAIT_MS}ms err=${ECHO_ERR_PCT}%)..." +java -cp "$SCRIPT_DIR/echo-server" EchoServer "$ECHO_PORT" "$ECHO_WAIT_MS" "$ECHO_ERR_PCT" & +ECHO_PID=$! +sleep 2 + +curl -sf "http://localhost:$ECHO_PORT/health" > /dev/null \ + || { echo "[run-samples] ERROR: EchoServer did not start"; exit 1; } +echo "[run-samples] EchoServer running (pid=$ECHO_PID)." + +# --------------------------------------------------------------------------- +# 3. Run sample +# --------------------------------------------------------------------------- +echo "" +echo "[run-samples] ===== Running: $SAMPLE ($MAIN_CLASS) =====" +echo "" + +run_sample() { + mvn exec:java \ + -f "$SCRIPT_DIR/wrapper/pom.xml" \ + -Dexec.mainClass="$MAIN_CLASS" \ + -Dbench.duration="$BENCH_DURATION" \ + -Dbench.op="$BENCH_OP" \ + -Dbench.batch="$BENCH_BATCH" \ + -Dgpg.skip=true \ + 2>/dev/null +} + +if [ "$SAVE_RESULTS" = "true" ]; then + run_sample | tee "$RESULTS_FILE" + echo "" + echo "[run-samples] Results saved to: $RESULTS_FILE" +else + run_sample +fi diff --git a/load-testing/run.sh b/load-testing/run.sh new file mode 100755 index 00000000..67cec1a9 --- /dev/null +++ b/load-testing/run.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash +# ============================================================================= +# Skyflow Java SDK v3 Load Testing - Orchestration Script +# +# Usage: +# ./load-testing/run.sh [insert|detokenize|all|benchmark] [extra k6 flags] +# +# Examples: +# ./load-testing/run.sh insert +# ./load-testing/run.sh detokenize --env VUS=100 --env NUM_TOKENS=5 +# ./load-testing/run.sh all --env DURATION=180 +# ./load-testing/run.sh benchmark +# BENCH_DURATION=60 BENCH_OP=detokenize ./load-testing/run.sh benchmark +# +# Environment variables (override defaults): +# ECHO_PORT Echo server port (default: 3015) +# WRAPPER_PORT Wrapper server port (default: 8080) +# ECHO_WAIT_MS Simulated vault latency ms (default: 0) +# ECHO_ERR_PCT Random error rate % (default: 0) +# VAULT_ID Vault ID for SDK (default: mock-vault-id) +# BENCH_DURATION Benchmark seconds per tier (default: 30) +# BENCH_OP Benchmark operation (default: insert) +# BENCH_BATCH Records/tokens per SDK call (default: 1) +# ============================================================================= +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +ECHO_PORT="${ECHO_PORT:-3015}" +WRAPPER_PORT="${WRAPPER_PORT:-8080}" +ECHO_WAIT_MS="${ECHO_WAIT_MS:-0}" +ECHO_ERR_PCT="${ECHO_ERR_PCT:-0}" +VAULT_ID="${VAULT_ID:-mock-vault-id}" +BENCH_DURATION="${BENCH_DURATION:-30}" +BENCH_OP="${BENCH_OP:-insert}" +BENCH_BATCH="${BENCH_BATCH:-1}" +TEST="${1:-all}" +shift || true # remaining args passed through to k6 + +ECHO_PID="" +WRAPPER_PID="" +POLLER_PID="" + +cleanup() { + echo "" + echo "[run.sh] Stopping servers..." + [ -n "$POLLER_PID" ] && kill "$POLLER_PID" 2>/dev/null || true + [ -n "$ECHO_PID" ] && kill "$ECHO_PID" 2>/dev/null || true + [ -n "$WRAPPER_PID" ] && kill "$WRAPPER_PID" 2>/dev/null || true + wait 2>/dev/null || true + echo "[run.sh] Done." +} +trap cleanup EXIT INT TERM + +# --------------------------------------------------------------------------- +# 1. Install v3 SDK to local Maven repo (idempotent) +# --------------------------------------------------------------------------- +echo "[run.sh] Installing v3 SDK to local Maven repo..." +mvn clean install +echo "[run.sh] v3 SDK installed." + +# --------------------------------------------------------------------------- +# 2. Build wrapper fat jar +# --------------------------------------------------------------------------- +echo "[run.sh] Building wrapper fat jar..." +mvn package -f "$SCRIPT_DIR/wrapper/pom.xml" -DskipTests -q +WRAPPER_JAR="$SCRIPT_DIR/wrapper/target/skyflow-load-test-wrapper-1.0.0.jar" +echo "[run.sh] Wrapper built: $WRAPPER_JAR" + +# --------------------------------------------------------------------------- +# 3. Compile echo server (single-file, no Maven needed) +# --------------------------------------------------------------------------- +echo "[run.sh] Compiling echo server..." +javac -d "$SCRIPT_DIR/echo-server/" "$SCRIPT_DIR/echo-server/EchoServer.java" +echo "[run.sh] Echo server compiled." + +# --------------------------------------------------------------------------- +# 4. Start echo server +# --------------------------------------------------------------------------- +echo "[run.sh] Starting EchoServer on port $ECHO_PORT (wait=${ECHO_WAIT_MS}ms, err=${ECHO_ERR_PCT}%)..." +java -cp "$SCRIPT_DIR/echo-server" EchoServer "$ECHO_PORT" "$ECHO_WAIT_MS" "$ECHO_ERR_PCT" & +ECHO_PID=$! +sleep 2 + +curl -sf "http://localhost:$ECHO_PORT/health" > /dev/null \ + || { echo "[run.sh] ERROR: Echo server did not start"; exit 1; } +echo "[run.sh] Echo server running (pid=$ECHO_PID)." + +# --------------------------------------------------------------------------- +# 5. Start wrapper server +# --------------------------------------------------------------------------- +echo "[run.sh] Starting WrapperServer on port $WRAPPER_PORT..." +VAULT_ID="$VAULT_ID" \ +VAULT_URL="http://localhost:$ECHO_PORT" \ +WRAPPER_PORT="$WRAPPER_PORT" \ +java -jar "$WRAPPER_JAR" & +WRAPPER_PID=$! +sleep 3 + +curl -sf "http://localhost:$WRAPPER_PORT/health" > /dev/null \ + || { echo "[run.sh] ERROR: Wrapper server did not start"; exit 1; } +echo "[run.sh] Wrapper server running (pid=$WRAPPER_PID)." + +# --------------------------------------------------------------------------- +# 6. Start metrics poller in background +# --------------------------------------------------------------------------- +echo "[run.sh] Starting metrics poller (INTERVAL=${INTERVAL:-5}s)..." +WRAPPER_PORT="$WRAPPER_PORT" RESULTS_DIR="$SCRIPT_DIR/results" \ + bash "$SCRIPT_DIR/poll-metrics.sh" & +POLLER_PID=$! +echo "[run.sh] Metrics poller running (pid=$POLLER_PID)." + +# --------------------------------------------------------------------------- +# 7. Run k6 test(s) +# --------------------------------------------------------------------------- +run_k6() { + local script="$1"; shift + echo "" + echo "[run.sh] ===== Running k6: $script =====" + k6 run \ + --env "WRAPPER_URL=http://localhost:$WRAPPER_PORT" \ + "$@" \ + "$SCRIPT_DIR/k6/$script" +} + +case "$TEST" in + insert) + run_k6 insert.js "$@" + ;; + detokenize) + run_k6 detokenize.js "$@" + ;; + all) + run_k6 insert.js "$@" + run_k6 detokenize.js "$@" + ;; + benchmark) + echo "" + echo "[run.sh] ===== Running SDK Benchmark (op=${BENCH_OP} duration=${BENCH_DURATION}s batch=${BENCH_BATCH}) =====" + mvn compile exec:java \ + -f "$SCRIPT_DIR/wrapper/pom.xml" \ + -Dexec.mainClass="com.skyflow.loadtest.samples.BenchmarkSample" \ + -Dbench.duration="$BENCH_DURATION" \ + -Dbench.op="$BENCH_OP" \ + -Dbench.batch="$BENCH_BATCH" \ + -Dgpg.skip=true \ + 2>/dev/null + ;; + *) + echo "[run.sh] Unknown test '$TEST'. Use: insert | detokenize | all | benchmark" + exit 1 + ;; +esac + +echo "" +echo "[run.sh] All tests completed." diff --git a/load-testing/wrapper/pom.xml b/load-testing/wrapper/pom.xml new file mode 100644 index 00000000..c8ce72ff --- /dev/null +++ b/load-testing/wrapper/pom.xml @@ -0,0 +1,62 @@ + + + 4.0.0 + + com.skyflow + skyflow-load-test-wrapper + 1.0.0 + jar + + + 8 + 8 + UTF-8 + + + + + com.skyflow + skyflow-java + 3.0.0-beta.9 + + + com.googlecode.json-simple + json-simple + 1.1.1 + + + + + + + + org.codehaus.mojo + exec-maven-plugin + 3.1.0 + + + + + org.apache.maven.plugins + maven-shade-plugin + 3.2.4 + + + package + shade + + + + com.skyflow.loadtest.WrapperServer + + + false + + + + + + + diff --git a/load-testing/wrapper/src/main/java/com/skyflow/loadtest/WrapperServer.java b/load-testing/wrapper/src/main/java/com/skyflow/loadtest/WrapperServer.java new file mode 100644 index 00000000..8e5006d1 --- /dev/null +++ b/load-testing/wrapper/src/main/java/com/skyflow/loadtest/WrapperServer.java @@ -0,0 +1,414 @@ +package com.skyflow.loadtest; + +/** + * Skyflow SDK Load Testing - Wrapper Server (v3 SDK) + * + * HTTP server that wraps the Skyflow Java v3 SDK. + * K6 hits this server → this server calls the SDK → SDK hits EchoServer. + * + * Configuration (environment variables): + * VAULT_ID Skyflow vault ID (default: mock-vault-id) + * VAULT_URL Echo server base URL (default: http://localhost:3015) + * WRAPPER_PORT Port this server listens (default: 8080) + * API_KEY Static API key for auth (default: mock-api-key) + * + * Endpoints exposed to K6: + * POST /insert -> skyflow.vault().bulkInsert() + * POST /detokenize -> skyflow.vault().bulkDetokenize() + * GET /health -> liveness check + * GET /metrics -> JVM + SDK call counters + * + * Optional request body fields (all endpoints): + * { "table": "my_table", "num_records": 3, "token": "tok-abc" } + * + * Build: + * # 1. Install v3 SDK to local Maven repo + * mvn install -f v3/pom.xml -DskipTests + * # 2. Build wrapper fat jar + * mvn package -f load-testing/wrapper/pom.xml + * + * Run: + * VAULT_URL=http://localhost:3015 \ + * java -jar load-testing/wrapper/target/skyflow-load-test-wrapper-1.0.0.jar + */ + +import com.skyflow.Skyflow; +import com.skyflow.config.Credentials; +import com.skyflow.config.VaultConfig; +import com.skyflow.enums.LogLevel; +import com.skyflow.errors.SkyflowException; +import com.skyflow.vault.data.DetokenizeRequest; +import com.skyflow.vault.data.InsertRecord; +import com.skyflow.vault.data.InsertRequest; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadMXBean; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; + +public class WrapperServer { + + // -- Configuration ------------------------------------------------------- + static final String VAULT_ID = env("VAULT_ID", "mock-vault-id"); + static final String VAULT_URL = env("VAULT_URL", "http://localhost:3015"); + static final int PORT = Integer.parseInt(env("WRAPPER_PORT", "8080")); +// static final String API_KEY = env("API_KEY", "mock-api-key"); + static final String TOKEN = ""; + // Default test data (overridable per-request) + static final String DEFAULT_TABLE = "load_test_table"; + static final String DEFAULT_TOKEN = "mock-token-0000-0000-0000-000000000001"; + + // -- Counters ------------------------------------------------------------ + static final AtomicLong reqTotal = new AtomicLong(); + static final AtomicLong reqSuccess = new AtomicLong(); + static final AtomicLong reqError = new AtomicLong(); + + // -- Per-operation latency tracking -------------------------------------- + static final OpMetrics insertMetrics = new OpMetrics(); + static final OpMetrics detokenizeMetrics = new OpMetrics(); + + /** + * + * Thread-safe per-operation metrics: latency list + counters + wall-clock start. + * Latency list is bounded to the last 100 000 samples to avoid unbounded growth. + */ + static class OpMetrics { + final AtomicLong total = new AtomicLong(); + final AtomicLong success = new AtomicLong(); + final AtomicLong error = new AtomicLong(); + final List latencies = Collections.synchronizedList(new ArrayList<>()); + static final int MAX_SAMPLES = 100_000; + + // firstCallTime: set exactly once (CAS); lastCallTime: updated on every call. + final AtomicLong firstCallTime = new AtomicLong(0); + final AtomicLong lastCallTime = new AtomicLong(0); + + long startCall() { return System.currentTimeMillis(); } + + void record(long startMs, boolean ok) { + long now = System.currentTimeMillis(); + long latency = now - startMs; + total.incrementAndGet(); + if (ok) success.incrementAndGet(); else error.incrementAndGet(); + synchronized (latencies) { + if (latencies.size() < MAX_SAMPLES) latencies.add(latency); + } + firstCallTime.compareAndSet(0, startMs); // set once, race-free + lastCallTime.set(now); + } + + void reset() { + total.set(0); + success.set(0); + error.set(0); + synchronized (latencies) { latencies.clear(); } + firstCallTime.set(0); + lastCallTime.set(0); + } + + /** Return a sorted copy for percentile calculation. */ + long[] sortedLatencies() { + long[] arr; + synchronized (latencies) { + arr = new long[latencies.size()]; + for (int i = 0; i < arr.length; i++) arr[i] = latencies.get(i); + } + java.util.Arrays.sort(arr); + return arr; + } + + long percentile(long[] sorted, double pct) { + if (sorted.length == 0) return 0; + int idx = (int) Math.ceil(pct / 100.0 * sorted.length) - 1; + return sorted[Math.max(0, Math.min(idx, sorted.length - 1))]; + } + + long avg(long[] sorted) { + if (sorted.length == 0) return 0; + long sum = 0; + for (long v : sorted) sum += v; + return sum / sorted.length; + } + + double rps() { + long first = firstCallTime.get(); + if (first == 0) return 0; // no calls yet + long last = lastCallTime.get(); + long now = System.currentTimeMillis(); + // Use 'now' while test is running; use lastCallTime after test ends. + // Test is considered running if the last call was within the past 2s. + long end = (now - last < 2000) ? now : last; + double elapsedSec = (end - first) / 1000.0; + return elapsedSec > 0 ? total.get() / elapsedSec : 0; + } + + String toJson(String op) { + long[] s = sortedLatencies(); + return String.format( + "\"%s\":{\"total\":%d,\"success\":%d,\"error\":%d," + + "\"rps\":%.1f,\"avg_ms\":%d,\"p50_ms\":%d,\"p95_ms\":%d,\"p99_ms\":%d}", + op, total.get(), success.get(), error.get(), + rps(), avg(s), percentile(s, 50), percentile(s, 95), percentile(s, 99)); + } + } + + // -- Shared SDK client --------------------------------------------------- + static Skyflow skyflowClient; + + public static void main(String[] args) throws Exception { + Credentials credentials = new Credentials(); + credentials.setToken(TOKEN); + + VaultConfig config = new VaultConfig(); + config.setVaultId(VAULT_ID); + config.setVaultURL(VAULT_URL); + config.setCredentials(credentials); + + skyflowClient = Skyflow.builder() + .setLogLevel(LogLevel.ERROR) + .addVaultConfig(config) + .build(); + + int httpThreads = Integer.parseInt(env("HTTP_THREADS", "200")); + HttpServer server = HttpServer.create(new InetSocketAddress(PORT), 0); + server.createContext("/insert", new InsertHandler()); + server.createContext("/detokenize", new DetokenizeHandler()); + server.createContext("/health", new HealthHandler()); + server.createContext("/metrics", new MetricsHandler()); + server.createContext("/reset", new ResetHandler()); + server.setExecutor(Executors.newFixedThreadPool(httpThreads)); + server.start(); + + System.out.printf("[WrapperServer-v3] port=%d vault=%s echo=%s%n", + PORT, VAULT_ID, VAULT_URL); + } + + // ========================================================================= + // POST /insert -> skyflow.vault().bulkInsert() + // ========================================================================= + static class InsertHandler implements HttpHandler { + @Override + public void handle(HttpExchange ex) throws IOException { + reqTotal.incrementAndGet(); + JSONObject params = parseBody(ex); + String table = str(params, "table", DEFAULT_TABLE); + int numRecords = 10000; + /// (int) longVal(params, "num_records", 1); + + ArrayList records = new ArrayList<>(); + for (int i = 0; i < numRecords; i++) { + Map data = new HashMap<>(); + data.put("mock_field", "load-test-" + i + "-" + System.currentTimeMillis()); + InsertRecord rec = InsertRecord.builder() +// .table(table) + .data(data) + .build(); + records.add(rec); + } + + InsertRequest request = InsertRequest.builder() + .table(table) + .records(records) + .build(); + + long t = insertMetrics.startCall(); + try { + com.skyflow.vault.data.InsertResponse response = + skyflowClient.vault().bulkInsert(request); + boolean ok = response.getErrors() == null || response.getErrors().isEmpty(); + insertMetrics.record(t, ok); + if (ok) { reqSuccess.incrementAndGet(); } else { reqError.incrementAndGet(); } + sendJson(ex, 200, toJson(response)); + } catch (Exception e) { + insertMetrics.record(t, false); + reqError.incrementAndGet(); + sendJson(ex, 500, errorJson(e.getMessage())); + } + } + } + + // ========================================================================= + // POST /detokenize -> skyflow.vault().bulkDetokenize() + // ========================================================================= + static class DetokenizeHandler implements HttpHandler { + public void handle(HttpExchange ex) throws IOException { + reqTotal.incrementAndGet(); + JSONObject params = parseBody(ex); + String token = str(params, "token", DEFAULT_TOKEN); + int numTokens = (int) longVal(params, "num_tokens", 1); + + List tokens = new ArrayList<>(); + for (int i = 0; i < numTokens; i++) { + tokens.add(token + "-" + i); + } + + DetokenizeRequest request = DetokenizeRequest.builder() + .tokens(tokens) + .build(); + + long t = detokenizeMetrics.startCall(); + try { + com.skyflow.vault.data.DetokenizeResponse response = + skyflowClient.vault().bulkDetokenize(request); + boolean ok = response.getErrors() == null || response.getErrors().isEmpty(); + detokenizeMetrics.record(t, ok); + if (ok) { reqSuccess.incrementAndGet(); } else { reqError.incrementAndGet(); } + sendJson(ex, 200, toJson(response)); + } catch (Exception e) { + detokenizeMetrics.record(t, false); + reqError.incrementAndGet(); + sendJson(ex, 500, errorJson(e.getMessage())); + } + } + } + + // ========================================================================= + // Utility handlers + // ========================================================================= + + static class HealthHandler implements HttpHandler { + public void handle(HttpExchange ex) throws IOException { + sendJson(ex, 200, + "{\"status\":\"ok\",\"sdk\":\"v3\",\"vault_id\":\"" + VAULT_ID + + "\",\"vault_url\":\"" + VAULT_URL + "\"}"); + } + } + + /** POST /reset — clears all counters and timestamps so RPS is correct across multiple runs. */ + static class ResetHandler implements HttpHandler { + public void handle(HttpExchange ex) throws IOException { + reqTotal.set(0); + reqSuccess.set(0); + reqError.set(0); + insertMetrics.reset(); + detokenizeMetrics.reset(); + sendJson(ex, 200, "{\"status\":\"reset\"}"); + } + } + + /** + * GET /metrics — JVM heap, GC stats, thread count, SDK call counters, + * and per-operation latency metrics (p50/p95/p99/avg, RPS). + * + * Response shape: + * { + * "sdk_calls": { "total", "success", "error" }, + * "insert": { "total", "success", "error", "rps", "avg_ms", "p50_ms", "p95_ms", "p99_ms" }, + * "detokenize": { ... same fields ... }, + * "jvm": { "heap_used_mb", "heap_total_mb", "heap_max_mb", + * "threads_current", "threads_peak", + * "threads_total_started", "threads_daemon", + * "gc_count", "gc_time_ms" } + * } + */ + static class MetricsHandler implements HttpHandler { + public void handle(HttpExchange ex) throws IOException { + Runtime rt = Runtime.getRuntime(); + long usedMb = (rt.totalMemory() - rt.freeMemory()) / (1024 * 1024); + long totalMb = rt.totalMemory() / (1024 * 1024); + long maxMb = rt.maxMemory() / (1024 * 1024); + + ThreadMXBean tmx = ManagementFactory.getThreadMXBean(); + int threadsCurrent = tmx.getThreadCount(); + int threadsDaemon = tmx.getDaemonThreadCount(); + int threadsPeak = tmx.getPeakThreadCount(); + long threadsTotalStarted = tmx.getTotalStartedThreadCount(); + + long gcCount = 0, gcTimeMs = 0; + List gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); + for (GarbageCollectorMXBean gc : gcBeans) { + gcCount += gc.getCollectionCount(); + gcTimeMs += gc.getCollectionTime(); + } + + String body = String.format( + "{\"sdk_calls\":{\"total\":%d,\"success\":%d,\"error\":%d}," + + "%s,%s," + + "\"jvm\":{\"heap_used_mb\":%d,\"heap_total_mb\":%d,\"heap_max_mb\":%d," + + "\"threads_current\":%d,\"threads_peak\":%d," + + "\"threads_total_started\":%d,\"threads_daemon\":%d," + + "\"gc_count\":%d,\"gc_time_ms\":%d}}", + reqTotal.get(), reqSuccess.get(), reqError.get(), + insertMetrics.toJson("insert"), + detokenizeMetrics.toJson("detokenize"), + usedMb, totalMb, maxMb, + threadsCurrent, threadsPeak, + threadsTotalStarted, threadsDaemon, + gcCount, gcTimeMs); + sendJson(ex, 200, body); + } + } + + // ========================================================================= + // Utilities + // ========================================================================= + + static void sendJson(HttpExchange ex, int code, String body) throws IOException { + byte[] bytes = body.getBytes(StandardCharsets.UTF_8); + ex.getResponseHeaders().set("Content-Type", "application/json"); + ex.sendResponseHeaders(code, bytes.length); + try (OutputStream os = ex.getResponseBody()) { os.write(bytes); } + } + + static JSONObject parseBody(HttpExchange ex) { + try { + String raw = readBody(ex.getRequestBody()); + if (raw == null || raw.trim().isEmpty()) return new JSONObject(); + return (JSONObject) new JSONParser().parse(raw); + } catch (Exception e) { + return new JSONObject(); + } + } + + static String readBody(InputStream is) throws IOException { + ByteArrayOutputStream buf = new ByteArrayOutputStream(); + byte[] data = new byte[4096]; int n; + while ((n = is.read(data)) != -1) buf.write(data, 0, n); + return buf.toString("UTF-8"); + } + + static String str(JSONObject obj, String key, String def) { + if (obj == null || !obj.containsKey(key)) return def; + Object v = obj.get(key); + return v != null ? v.toString() : def; + } + + static long longVal(JSONObject obj, String key, long def) { + if (obj == null || !obj.containsKey(key)) return def; + Object v = obj.get(key); + if (v instanceof Number) return ((Number) v).longValue(); + try { return Long.parseLong(v.toString()); } catch (Exception e) { return def; } + } + + static String errorJson(String msg) { + String safe = msg == null ? "unknown error" : msg.replace("\"", "'"); + return "{\"error\":\"" + safe + "\"}"; + } + + /** Minimal toString for v3 response objects (they have their own toString via Gson). */ + static String toJson(Object obj) { + return obj != null ? obj.toString() : "{}"; + } + + static String env(String key, String def) { + String v = System.getenv(key); + return (v != null && !v.isEmpty()) ? v : def; + } +} diff --git a/v3/pom.xml b/v3/pom.xml index 372ac376..24634f93 100644 --- a/v3/pom.xml +++ b/v3/pom.xml @@ -11,7 +11,7 @@ skyflow-java - 3.0.0-beta.8 + 3.0.0-beta.9 jar ${project.groupId}:${project.artifactId} Skyflow V3 SDK for the Java programming language diff --git a/v3/src/main/java/com/skyflow/utils/Utils.java b/v3/src/main/java/com/skyflow/utils/Utils.java index 97aa469f..180b01a5 100644 --- a/v3/src/main/java/com/skyflow/utils/Utils.java +++ b/v3/src/main/java/com/skyflow/utils/Utils.java @@ -258,10 +258,11 @@ public static String getEnvVaultURL() throws SkyflowException { if (vaultURL != null && vaultURL.trim().isEmpty()) { LogUtil.printErrorLog(ErrorLogs.EMPTY_VAULT_URL.getLog()); throw new SkyflowException(ErrorCode.INVALID_INPUT.getCode(), ErrorMessage.EmptyVaultUrl.getMessage()); - } else if (vaultURL != null && !isValidURL(vaultURL)) { - LogUtil.printErrorLog(ErrorLogs.INVALID_VAULT_URL_FORMAT.getLog()); - throw new SkyflowException(ErrorCode.INVALID_INPUT.getCode(), ErrorMessage.InvalidVaultUrlFormat.getMessage()); } +// else if (vaultURL != null && !isValidURL(vaultURL)) { +// LogUtil.printErrorLog(ErrorLogs.INVALID_VAULT_URL_FORMAT.getLog()); +// throw new SkyflowException(ErrorCode.INVALID_INPUT.getCode(), ErrorMessage.InvalidVaultUrlFormat.getMessage()); +// } return vaultURL; } catch (DotenvException e) { return null; diff --git a/v3/src/main/java/com/skyflow/utils/validations/Validations.java b/v3/src/main/java/com/skyflow/utils/validations/Validations.java index ace0cec7..b5198dd0 100644 --- a/v3/src/main/java/com/skyflow/utils/validations/Validations.java +++ b/v3/src/main/java/com/skyflow/utils/validations/Validations.java @@ -199,10 +199,11 @@ public static void validateVaultConfiguration(VaultConfig vaultConfig) throws Sk if (vaultURL.trim().isEmpty()) { LogUtil.printErrorLog(ErrorLogs.EMPTY_VAULT_URL.getLog()); throw new SkyflowException(ErrorCode.INVALID_INPUT.getCode(), ErrorMessage.EmptyVaultUrl.getMessage()); - } else if (!Utils.isValidURL(vaultURL)) { - LogUtil.printErrorLog(ErrorLogs.INVALID_VAULT_URL_FORMAT.getLog()); - throw new SkyflowException(ErrorCode.INVALID_INPUT.getCode(), ErrorMessage.InvalidVaultUrlFormat.getMessage()); } +// else if (!Utils.isValidURL(vaultURL)) { +// LogUtil.printErrorLog(ErrorLogs.INVALID_VAULT_URL_FORMAT.getLog()); +// throw new SkyflowException(ErrorCode.INVALID_INPUT.getCode(), ErrorMessage.InvalidVaultUrlFormat.getMessage()); +// } } else if (Utils.getEnvVaultURL() == null) { if (clusterId == null) { LogUtil.printErrorLog(ErrorLogs.EITHER_VAULT_URL_OR_CLUSTER_ID_REQUIRED.getLog());