Skip to content

Security update: Bump app version to 1.10.1-fix.1 (CVE-2025-55182) #40

Security update: Bump app version to 1.10.1-fix.1 (CVE-2025-55182)

Security update: Bump app version to 1.10.1-fix.1 (CVE-2025-55182) #40

Workflow file for this run

name: CI - External Secrets Operator Tests
permissions:
contents: read
on:
workflow_dispatch:
inputs:
branch:
description: "Branch to test"
required: true
run_all_tests:
description: "Run all test configurations"
type: boolean
default: true
specific_test:
description: "Specific test to run (if not running all)"
required: false
type: choice
options:
- "legacy: using built-in PostgreSQL and built-in Redis"
- "legacy: using external redis and built-in(PostgreSQL)"
- "legacy: using external PostgreSQL and built-in Redis"
- "legacy: using external S3 and built-in PostgreSQL and Redis"
- "legacy: using external Elasticsearch"
- "legacy + OTEL: using built-in (pg, redis)"
- "external-secrets: using built-in (pg, redis)"
- "external-secrets: using external (pg)"
- "external-secrets: using external (s3)"
- "external-secrets: using external (redis)"
- "external-secrets: using external (es)"
- "external-secrets: using external (s3, pg, es, redis)"
- "external-secrets + OTEL: using built-in (pg, redis)"
pull_request:
branches:
- master
paths:
- 'charts/dify/Chart.yaml'
jobs:
# Stage 1: Pre-pull all images and prepare infrastructure
prepare-images:
runs-on: ubuntu-latest
outputs:
cache-hit: ${{ steps.cache-check.outputs.cache-hit }}
image-cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout code
uses: actions/checkout@v4
# Generate unified cache key
- name: Generate cache key
id: cache-key
run: |
CACHE_KEY="${{ runner.os }}-minikube-images-${{ hashFiles('ci/values/*.yaml', 'charts/dify/Chart.yaml', 'charts/dify/Chart.lock') }}"
echo "key=$CACHE_KEY" >> $GITHUB_OUTPUT
echo "Generated cache key: $CACHE_KEY"
# Check image cache with optimized restore strategy
- name: Check Docker images cache
id: cache-check
uses: actions/cache@v4
with:
path: ~/.minikube/cache/images
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-minikube-images-${{ hashFiles('charts/dify/Chart.yaml', 'charts/dify/Chart.lock') }}-
${{ runner.os }}-minikube-images-${{ hashFiles('charts/dify/Chart.yaml') }}-
${{ runner.os }}-minikube-images-
# Only run if cache miss
- name: Setup minikube for image caching
if: steps.cache-check.outputs.cache-hit != 'true'
uses: medyagh/setup-minikube@latest
with:
cache: true
# Cache all images (unified list for all scenarios)
- name: Cache all Docker images
if: steps.cache-check.outputs.cache-hit != 'true'
run: |
echo "Pre-pulling all Docker images to minikube cache..."
# Extract images from values files using script
chmod +x ci/scripts/extract-images.sh
EXTRACTED_IMAGES=$(ci/scripts/extract-images.sh values-eso.yaml github-actions 2>/dev/null)
# Add additional images for ESO scenarios
ADDITIONAL_IMAGES="hashicorp/vault:1.20.4 bitnamilegacy/minio:2025.7.23-debian-12-r3 cr.weaviate.io/semitechnologies/weaviate:1.27.27 bitnamilegacy/elasticsearch:8.14.3 ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s:0.136.0"
# Combine extracted and additional images
ALL_IMAGES="$EXTRACTED_IMAGES $ADDITIONAL_IMAGES"
echo "ALL_IMAGES=$ALL_IMAGES" >> $GITHUB_ENV
# Display extracted images for verification
echo "Extracted Images from values-eso.yaml:"
ci/scripts/extract-images.sh values-eso.yaml list
echo ""
echo "Additional ESO-specific Images:"
echo "hashicorp/vault:1.20.4"
echo "bitnamilegacy/minio:2025.7.23-debian-12-r3"
echo "cr.weaviate.io/semitechnologies/weaviate:1.27.27"
echo "bitnamilegacy/elasticsearch:8.14.3"
echo "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s:0.136.0"
IMAGE_COUNT=$(echo "$ALL_IMAGES" | wc -w)
echo "Total images to cache: $IMAGE_COUNT"
# Pull images in parallel (limit to 3 concurrent)
echo "$ALL_IMAGES" | tr ' ' '\n' | grep -v '^[[:space:]]*$' | while read -r image; do
{
echo "Pulling $image..."
if minikube image pull "$image" 2>/dev/null; then
echo "Successfully cached $image"
else
echo "Failed to cache $image (might already exist)"
fi
} &
# Limit concurrent pulls
if (( $(jobs -r | wc -l) >= 3 )); then
wait -n
fi
done
# Wait for all remaining jobs
wait
echo "Image caching completed"
# Show cached images
echo "Cached images in minikube:"
minikube image ls --format table | head -20 || true
# Stage 2: Matrix testing with cached images
test-matrix:
needs: prepare-images
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- config_name: "legacy: using built-in PostgreSQL and built-in Redis"
values_file: "values-legacy.yaml"
use_eso: false
use_external_redis: false
use_external_pg: false
use_external_s3: false
use_external_es: false
- config_name: "legacy: using external redis and built-in(PostgreSQL)"
values_file: "values-legacy-external-redis.yaml"
use_eso: false
use_external_redis: true
use_external_pg: false
use_external_s3: false
use_external_es: false
- config_name: "legacy: using external PostgreSQL and built-in Redis"
values_file: "values-legacy-external-pg.yaml"
use_eso: false
use_external_redis: false
use_external_pg: true
use_external_s3: false
use_external_es: false
- config_name: "legacy: using external S3 and built-in PostgreSQL and Redis"
values_file: "values-legacy-external-s3.yaml"
use_eso: false
use_external_redis: false
use_external_pg: false
use_external_s3: true
use_external_es: false
- config_name: "legacy: using external Elasticsearch"
values_file: "values-legacy-external-es.yaml"
use_eso: false
use_external_redis: false
use_external_pg: false
use_external_s3: false
use_external_es: true
- config_name: "legacy + OTEL: using built-in (pg, redis)"
values_file: "values-legacy-otel.yaml"
use_eso: false
use_external_redis: false
use_external_pg: false
use_external_s3: false
use_external_es: false
use_otel_collector: true
- config_name: "external-secrets: using built-in (pg, redis)"
values_file: "values-eso.yaml"
use_eso: true
use_external_redis: false
use_external_pg: false
use_external_s3: false
use_external_es: false
- config_name: "external-secrets: using external (pg)"
values_file: "values-eso-external-pg.yaml"
use_eso: true
use_external_redis: false
use_external_pg: true
use_external_s3: false
use_external_es: false
- config_name: "external-secrets: using external (s3)"
values_file: "values-eso-external-s3.yaml"
use_eso: true
use_external_redis: false
use_external_pg: false
use_external_s3: true
use_external_es: false
- config_name: "external-secrets: using external (redis)"
values_file: "values-eso-external-redis.yaml"
use_eso: true
use_external_redis: true
use_external_pg: false
use_external_s3: false
use_external_es: false
- config_name: "external-secrets: using external (es)"
values_file: "values-eso-external-es.yaml"
use_eso: true
use_external_redis: false
use_external_pg: false
use_external_s3: false
use_external_es: true
- config_name: "external-secrets: using external (s3, pg, es, redis)"
values_file: "values-eso-external-s3-pg-es-redis.yaml"
use_eso: true
use_external_redis: true
use_external_pg: true
use_external_s3: true
use_external_es: true
- config_name: "external-secrets + OTEL: using built-in (pg, redis)"
values_file: "values-eso-otel.yaml"
use_eso: true
use_external_redis: false
use_external_pg: false
use_external_s3: false
use_external_es: false
use_otel_collector: true
name: ${{ matrix.config_name }}
steps:
- name: Checkout code
uses: actions/checkout@v4
# Restore all caches with optimized restore strategies
- name: Restore Docker images cache
uses: actions/cache@v4
with:
path: ~/.minikube/cache/images
key: ${{ needs.prepare-images.outputs.image-cache-key }}
restore-keys: |
${{ runner.os }}-minikube-images-${{ hashFiles('charts/dify/Chart.yaml', 'charts/dify/Chart.lock') }}-
${{ runner.os }}-minikube-images-${{ hashFiles('charts/dify/Chart.yaml') }}-
${{ runner.os }}-minikube-images-
- name: Restore Helm repositories cache
uses: actions/cache@v4
with:
path: ~/.cache/helm/repository
key: ${{ runner.os }}-helm-repos-${{ hashFiles('charts/dify/Chart.yaml') }}
restore-keys: |
${{ runner.os }}-helm-repos-
- name: Restore Helm chart dependencies cache
uses: actions/cache@v4
with:
path: charts/dify/charts
key: ${{ runner.os }}-helm-deps-${{ hashFiles('charts/dify/Chart.lock', 'charts/dify/Chart.yaml') }}
restore-keys: |
${{ runner.os }}-helm-deps-${{ hashFiles('charts/dify/Chart.yaml') }}-
${{ runner.os }}-helm-deps-
# Each job runs in its own VM, so we need to setup minikube and helm again
- name: Setup minikube
uses: medyagh/setup-minikube@latest
with:
cache: true
- name: Setup Helm
uses: azure/setup-helm@v4
with:
version: "v3.13.0"
# Add Helm repositories (leveraging cache from prepare-images stage)
- name: Add Helm repositories
run: |
echo "Adding Helm repositories..."
helm repo add external-secrets https://charts.external-secrets.io || true
helm repo add hashicorp https://helm.releases.hashicorp.com || true
helm repo add bitnami https://charts.bitnami.com/bitnami || true
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts || true
helm repo update
- name: Verify cached images
run: |
echo "Verifying cached images for scenario: ${{ matrix.config_name }}"
echo "Images available in minikube:"
minikube image ls --format table | head -20 || true
- name: Install chart dependencies
run: |
cd charts/dify
if [ ! -d "charts" ] || [ ! "$(ls -A charts)" ]; then
echo "Installing Helm chart dependencies..."
helm dependency update
else
echo "Helm chart dependencies already cached"
fi
- name: Install External Secrets Operator
if: matrix.use_eso
run: |
echo "Installing External Secrets Operator..."
helm install external-secrets external-secrets/external-secrets \
--version 0.20.1 \
--namespace external-secrets-system \
--create-namespace \
--wait \
--timeout 120s
- name: Install Vault
if: matrix.use_eso
run: |
echo "Installing Vault..."
helm install vault hashicorp/vault \
--version 0.31.0 \
--set "server.dev.enabled=true" \
--set "server.dev.devRootToken=dev-only-token" \
--wait \
--timeout 120s
- name: Wait for External Secrets and Vault to be ready
if: matrix.use_eso
run: |
echo "Waiting for External Secrets Operator components..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=external-secrets -n external-secrets-system --timeout=120s
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=external-secrets-webhook -n external-secrets-system --timeout=120s
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=external-secrets-cert-controller -n external-secrets-system --timeout=120s
echo "Waiting for Vault to be ready..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=vault --timeout=120s
echo "All components are ready!"
- name: Setup and validate Vault secrets
if: matrix.use_eso
run: |
chmod +x ci/scripts/setup-and-validate-vault.sh
ci/scripts/setup-and-validate-vault.sh
- name: Create test ClusterSecretStore
if: matrix.use_eso
run: |
chmod +x ci/scripts/create-test-clustersecretstore.sh
ci/scripts/create-test-clustersecretstore.sh
- name: Install external PostgreSQL
if: matrix.use_external_pg
run: |
echo "Installing external PostgreSQL..."
helm install external-postgres bitnami/postgresql \
--version 12.5.6 \
--set image.repository="bitnamilegacy/postgresql" \
--set auth.postgresPassword="difyai123456" \
--set auth.database="dify" \
--set primary.service.type="ClusterIP" \
--set primary.service.ports.postgresql=5432 \
--wait \
--timeout 300s
- name: Setup external PostgreSQL DNS
if: matrix.use_external_pg
run: |
chmod +x ci/scripts/setup-external-postgres-dns.sh
ci/scripts/setup-external-postgres-dns.sh
- name: Install MinIO
if: matrix.use_external_s3
run: |
echo "Installing MinIO for S3 testing..."
helm install minio bitnami/minio \
--version 17.0.21 \
--set image.repository="bitnamilegacy/minio" \
--set auth.rootUser="minio-root" \
--set auth.rootPassword="minio123456" \
--set defaultBuckets="difyai" \
--set service.type="ClusterIP" \
--set service.ports.api=9000 \
--set console.enabled=false \
--wait \
--timeout 300s
- name: Install Redis
if: matrix.use_external_redis
run: |
echo "Installing Redis instance"
helm install test-redis bitnami/redis \
--version 16.13.2 \
--set image.repository="bitnamilegacy/redis" \
--set auth.password="difyai123456" \
--set master.persistence.enabled=false \
--set replica.replicaCount=0 \
--set fullnameOverride="redis" \
--wait \
--timeout 300s
- name: Install Elasticsearch
if: matrix.use_external_es
run: |
echo "Installing Elasticsearch for vector database testing..."
helm install external-elasticsearch bitnami/elasticsearch \
--version 21.3.6 \
--set image.repository="bitnamilegacy/elasticsearch" \
--set auth.elasticPassword="elasticsearch123456" \
--set master.replicaCount=1 \
--set data.replicaCount=0 \
--set coordinating.replicaCount=0 \
--set ingest.replicaCount=0 \
--set master.persistence.enabled=false \
--set service.type="ClusterIP" \
--set service.ports.restAPI=9200 \
--set sysctlImage.enabled=false \
--wait \
--timeout 300s
- name: Install OTEL Collector
if: matrix.use_otel_collector
run: |
echo "Installing minimal OpenTelemetry Collector for testing..."
# Install minimal OTEL Collector using official configuration
helm install otel-collector open-telemetry/opentelemetry-collector \
--version 0.136.1 \
--set mode=deployment \
--set image.repository="ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s" \
--set command.name="otelcol-k8s" \
--set fullnameOverride=otel-collector \
--set resources.limits.cpu=100m \
--set resources.limits.memory=128Mi \
--set resources.requests.cpu=50m \
--set resources.requests.memory=64Mi \
--set config.service.telemetry.metrics.address="0.0.0.0:8888" \
--set config.exporters.debug.verbosity=basic \
--set config.service.pipelines.logs=null \
--set config.service.pipelines.metrics.receivers="{otlp,prometheus}" \
--set config.service.pipelines.metrics.processors="{memory_limiter,batch}" \
--set config.service.pipelines.metrics.exporters="{debug}" \
--set ports.metrics.enabled=true \
--set ports.metrics.containerPort=8888 \
--set ports.metrics.servicePort=8888 \
--set ports.metrics.protocol=TCP \
--wait \
--timeout=300s
echo "Minimal OTEL Collector installed successfully!"
- name: Test Helm template rendering
run: |
chmod +x ci/scripts/test-helm-template.sh
ci/scripts/test-helm-template.sh "${{ matrix.values_file }}" "${{ matrix.config_name }}"
- name: Deploy and test
env:
HELM_TIMEOUT: "700s"
POD_READY_TIMEOUT: "200s"
USE_OTEL_COLLECTOR: ${{ matrix.use_otel_collector || 'false' }}
run: |
chmod +x ci/scripts/deploy-and-test.sh
ci/scripts/deploy-and-test.sh "${{ matrix.values_file }}"
- name: Cache cleanup on failure
if: failure()
run: |
echo "Cleaning up caches due to job failure..."
rm -rf charts/dify/charts
- name: Cache cleanup on failure
if: failure()
run: |
echo "Cleaning up caches due to job failure..."
rm -rf charts/dify/charts
# Summary job
test-summary:
runs-on: ubuntu-latest
name: Test Summary
needs: test-matrix
if: always()
steps:
- name: Check test results
run: |
echo "Test Results Summary"
echo "===================="
if [[ "${{ needs.test-matrix.result }}" != "success" ]]; then
echo "One or more test configurations failed"
echo "Please check the individual job logs for details"
exit 1
else
echo "All test configurations passed successfully!"
fi
echo ""
echo "Tested Configurations:"
echo "- ESO + Built-in PostgreSQL"
echo "- ESO + External PostgreSQL"
echo "- ESO + External S3 bucket"
echo "- ESO + External S3 + External PostgreSQL"
echo "- Legacy + Built-in PostgreSQL"
echo "- Legacy + External PostgreSQL"
echo "- Legacy + External S3"
echo ""
echo "CI Pipeline completed successfully!"