diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index a8bdca88..15196ba6 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -59,6 +59,14 @@ ## Documentation Preferences +### Writing Style +- **Terse and practical**: Every sentence must convey actionable or necessary information. Remove filler, marketing language, and motivational text. +- **No fluff sections**: Do not include "What is X?", "Why use X?", "Benefits of X", "Key Features", or "Next Steps" sections. Readers already chose the tool — tell them how to use it. +- **No troubleshooting sections**: Do not add generic troubleshooting or "Common Issues" sections to docs pages. +- **Minimal repetition**: State a fact once. Cross-reference instead of restating. If two pages cover overlapping topics, consolidate into one and link. +- **Lead with commands, config, and examples**: Tables, code blocks, CLI commands, and configuration snippets are preferred over prose explanations. +- **Short introductions**: A page introduction should be one or two sentences stating what the page covers, then immediately begin the content. + ### Diagrams and Visual Documentation - **Always use Mermaid diagrams** instead of ASCII art for workflow diagrams, architecture diagrams, and flowcharts - Use appropriate Mermaid diagram types: @@ -67,10 +75,9 @@ - `sequenceDiagram` for API interactions - `gitgraph` for branch/release strategies - Include styling with `classDef` for better visual hierarchy -- Add descriptive comments and emojis sparingly for clarity ### Documentation Standards -- Keep documentation DRY (Don't Repeat Yourself) - reference other docs instead of duplicating +- Keep documentation DRY — reference other docs instead of duplicating content - Use clear cross-references between related documentation files - Update the main architecture document when workflow structure changes diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml new file mode 100644 index 00000000..1f2b7374 --- /dev/null +++ b/.github/workflows/build-images.yml @@ -0,0 +1,76 @@ +name: Build and Push Images + +on: + push: + branches: + - '**' + tags: + - '**' + paths: + - 'images/**' + schedule: + # Run weekly on Sunday at 11:00 PM UTC (Sunday-Monday night depending on timezone) + - cron: '0 23 * * 0' + workflow_dispatch: + +env: + REGISTRY: ghcr.io + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Docker Meta (Base) + id: meta-base + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ github.repository }}/base + bake-target: base + tags: | + type=sha + type=ref,event=branch + type=ref,event=tag + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }} + + - name: Docker Meta (NodeJS) + id: meta-nodejs + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ github.repository }}/nodejs + bake-target: nodejs + tags: | + type=sha + type=ref,event=branch + type=ref,event=tag + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }} + + - name: Build and push + uses: docker/bake-action@v5 + with: + workdir: ./images + push: true + files: | + ./docker-bake.hcl + ${{ steps.meta-base.outputs.bake-file }} + ${{ steps.meta-nodejs.outputs.bake-file }} + set: | + base.cache-from=type=gha,scope=base-${{ github.ref_name }} + base.cache-to=type=gha,mode=max,scope=base-${{ github.ref_name }} + nodejs.cache-from=type=gha,scope=nodejs-${{ github.ref_name }} + nodejs.cache-to=type=gha,mode=max,scope=nodejs-${{ github.ref_name }} diff --git a/.github/workflows/build-templates.yml b/.github/workflows/build-templates.yml deleted file mode 100644 index 80f76f06..00000000 --- a/.github/workflows/build-templates.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Build and Upload Templates -on: - push: - paths: - - 'packer/**' - schedule: - - cron: "0 4 * * *" - workflow_dispatch: - -jobs: - build: - runs-on: ubuntu-latest - env: - PROXMOX_API_URL: ${{ secrets.PROXMOX_API_URL }} - PROXMOX_TOKEN_ID: ${{ secrets.PROXMOX_TOKEN_ID }} - PROXMOX_TOKEN_SECRET: ${{ secrets.PROXMOX_TOKEN_SECRET }} - - steps: - - uses: actions/checkout@v4 - - - name: Set TEMPLATE_VERSION - run: echo "TEMPLATE_VERSION=$(date +%Y%m%d)" >> $GITHUB_ENV - - - name: Install dependencies - run: sudo apt-get update && sudo apt-get install -y packer ansible zstd python3-requests - - - name: Build Debian 12 Template - run: | - packer build \ - -var "template_version=${TEMPLATE_VERSION}" \ - debian12.pkr.hcl - - - name: Upload Debian 12 Template - run: | - python3 api/proxmox_upload.py \ - --file /tmp/output/debian12-fungible_${TEMPLATE_VERSION}.tar.xz - - - name: Build Rocky 9 Template - run: | - packer build \ - -var "template_version=${TEMPLATE_VERSION}" \ - rocky9.pkr.hcl - - - name: Upload Rocky 9 Template - run: | - python3 api/proxmox_upload.py \ - --file /tmp/output/rocky9-fungible_${TEMPLATE_VERSION}.tar.xz \ No newline at end of file diff --git a/ci-cd-automation/README.md b/ci-cd-automation/README.md deleted file mode 100644 index b8b97510..00000000 --- a/ci-cd-automation/README.md +++ /dev/null @@ -1 +0,0 @@ -# CI/CD Automation \ No newline at end of file diff --git a/ci-cd-automation/check-container-exists.sh b/ci-cd-automation/check-container-exists.sh deleted file mode 100644 index ba0b2c9a..00000000 --- a/ci-cd-automation/check-container-exists.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# Script to check if a container exists, and if so, whether it needs to be updated or cloned. -# Last Modified by Maxwell Klema on July 13th, 2025 -# ----------------------------------------------------- - -outputError() { - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - echo -e "${BOLD}${MAGENTA}❌ Script Failed. Exiting... ${RESET}" - echo -e "$2" - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - exit $1 -} - -RESET="\033[0m" -BOLD="\033[1m" -MAGENTA='\033[35m' - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}🔎 Check Container Exists ${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -TYPE_RUNNER="true" -source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh -source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh - -STATUS=$? -if [ "$STATUS" != 0 ]; then - exit 1; -fi - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") - -# Check if repository folder is present. -if [ "$PVE1" == "true" ]; then - if [ ! -z "$CONTAINER_ID" ] && pct exec $CONTAINER_ID -- test -f /root/container-updates.log; then - exit 2; # Update Repository - else - exit 0; # Clone Repository - fi -else - if [ ! -z "$CONTAINER_ID" ] && ssh 10.15.0.5 "pct exec $CONTAINER_ID -- test -f /root/container-updates.log" ; then - exit 2; # Update Repository - else - exit 0; # Clone Repository - fi -fi \ No newline at end of file diff --git a/ci-cd-automation/delete-container.sh b/ci-cd-automation/delete-container.sh deleted file mode 100644 index d46ce23b..00000000 --- a/ci-cd-automation/delete-container.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Script to delete a container permanently -# Last Modified by Maxwell Klema on July 13th, 2025 -# ----------------------------------------------------- - -RESET="\033[0m" -BOLD="\033[1m" -MAGENTA='\033[35m' - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}🗑️ Delete Container ${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -CMD=( -bash /var/lib/vz/snippets/helper-scripts/delete-runner.sh -"$PROJECT_REPOSITORY" -"$GITHUB_PAT" -"$PROXMOX_USERNAME" -"$PROXMOX_PASSWORD" -"$CONTAINER_NAME" -) - -# Safely quote each argument for the shell -QUOTED_CMD=$(printf ' %q' "${CMD[@]}") - -tmux new-session -d -s delete-runner "$QUOTED_CMD" - -echo "✅ Container with name \"$CONTAINER_NAME\" will be permanently deleted." -exit 0 # Container Deleted Successfully \ No newline at end of file diff --git a/ci-cd-automation/helper-scripts/PVE_user_authentication.sh b/ci-cd-automation/helper-scripts/PVE_user_authentication.sh deleted file mode 100755 index f2f07204..00000000 --- a/ci-cd-automation/helper-scripts/PVE_user_authentication.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Script that checks if a user is authenticated in Proxmox PVE Realm @ opensource.mieweb.org -# Last Modified by Maxwell Klema on July 13th, 2025 -# ----------------------------------------------------- - -# Authenticate User (Only Valid Users can Create Containers) - -if [ -z "$PROXMOX_USERNAME" ]; then - read -p "Enter Proxmox Username → " PROXMOX_USERNAME -fi - -if [ -z "$PROXMOX_PASSWORD" ]; then - read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD - echo "" -fi - -USER_AUTHENTICATED=$(ssh root@create-container "node /root/bin/js/runner.js authenticateUser \"$PROXMOX_USERNAME\" \"$PROXMOX_PASSWORD\"") - -if [ $USER_AUTHENTICATED == 'false' ]; then - outputError 1 "Your Proxmox account, $PROXMOX_USERNAME@pve, was not authenticated. Retry with valid credentials." -fi - -echo "🎉 Your proxmox account, $PROXMOX_USERNAME@pve, has been authenticated" \ No newline at end of file diff --git a/ci-cd-automation/helper-scripts/create-template.sh b/ci-cd-automation/helper-scripts/create-template.sh deleted file mode 100755 index 54d5e1ea..00000000 --- a/ci-cd-automation/helper-scripts/create-template.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash -# Creates a template of a LXC container -# Last modified by Maxwell Klema on July 23rd, 2025. -# -------------------------------------------------- - -if [ "${DEPLOY_ON_START^^}" != "Y" ] || [ "${GH_ACTION^^}" != "Y" ]; then - return 0 -fi - -DEFAULT_BRANCH=$(curl -s https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME | jq -r '.default_branch') - -if [ "$DEFAULT_BRANCH" != "$PROJECT_BRANCH" ]; then - return 0 -fi - -echo "📝 Creating Container Template..." - -# Check if template already exists, and if it does, destroy it ===== - -TEMPLATE_NAME="template-$REPO_BASE_NAME-$REPO_BASE_NAME_WITH_OWNER" -TEMPLATE_CONTAINER_ID=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$TEMPLATE_NAME" '$3 == name {print $1}') - -if [ ! -z "$TEMPLATE_CONTAINER_ID" ]; then - pct destroy $TEMPLATE_CONTAINER_ID | true -fi - -# Clone LXC container and convert it into a template ===== - -NEXT_ID=$(pvesh get /cluster/nextid) - -if (( $CONTAINER_ID % 2 == 101 )); then - ssh root@10.15.0.5 " - pct clone $CONTAINER_ID $NEXT_ID \ - --hostname "$TEMPLATE_NAME" \ - --full true - pct migrate $NEXT_ID intern-phxdc-pve1 --target-storage containers-pve1 - " > /dev/null 2>&1 -else - pct clone $CONTAINER_ID $NEXT_ID \ - --hostname "$TEMPLATE_NAME" \ - --full true -fi - -# AUTH_TOKEN_RESPONSE=$(curl --location --request POST https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/registration-token --header "Authorization: token $GITHUB_PAT") -# TOKEN=$(echo "$AUTH_TOKEN_RESPONSE" | jq -r '.token') - -# Remove rsa keys ==== -pct start $NEXT_ID -pct enter $NEXT_ID < /dev/null 2>&1 - else - ssh root@10.15.0.5 "pct destroy $CONTAINER_ID" > /dev/null 2>&1 - fi -else - if pct status "$CONTAINER_ID" | grep -q "status: running"; then - pct stop "$CONTAINER_ID" && pct destroy "$CONTAINER_ID" > /dev/null 2>&1 - else - pct destroy "$CONTAINER_ID" > /dev/null 2>&1 - fi -fi - -source /usr/local/bin/prune_iptables.sh - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") -REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) - -RUNNERS=$(curl --location https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners --header "Authorization: token $GITHUB_PAT") - -while read -r RUNNER; do - RUNNER_NAME=$(echo "$RUNNER" | jq -r '.name') - if [ "$RUNNER_NAME" == "$CONTAINER_NAME" ]; then - RUNNER_ID=$(echo "$RUNNER" | jq -r '.id') - curl --location --request DELETE "https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/$RUNNER_ID" \ - --header "Authorization: token $GITHUB_PAT" - fi -done < <(echo "$RUNNERS" | jq -c '.runners[]') \ No newline at end of file diff --git a/ci-cd-automation/helper-scripts/repository_status.sh b/ci-cd-automation/helper-scripts/repository_status.sh deleted file mode 100755 index 5ec73b76..00000000 --- a/ci-cd-automation/helper-scripts/repository_status.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Helper script to determine if container needs to clone repository or simply update it -# Last Modified by Maxwell Klema on July 21st, 2025 -# ------------------------------------------------- - -set +e -TYPE_RUNNER="true" -source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh -source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh - -STATUS=$? - -if [ "$STATUS" != 0 ]; then - exit 1; -fi - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") - -# Check if repository folder is present. - -if [ "$PVE1" == "true" ]; then - if pct exec $CONTAINER_ID -- test -d /root/$REPO_BASE_NAME; then - echo "Update" - exit 2; # Update Repository - else - echo "Clone" - exit 0; # Clone Repository - fi -else - if ssh 10.15.0.5 "pct exec $CONTAINER_ID -- test -d /root/$REPO_BASE_NAME"; then - echo "Update" - exit 2; # Update Repository - else - echo "Clone" - exit 0; # Clone Repository - fi -fi \ No newline at end of file diff --git a/ci-cd-automation/helper-scripts/verify_container_ownership.sh b/ci-cd-automation/helper-scripts/verify_container_ownership.sh deleted file mode 100755 index f99849ae..00000000 --- a/ci-cd-automation/helper-scripts/verify_container_ownership.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# Script to verify container ownership based on name and CTID -# Last Modified by Maxwell Klema on August 5th, 2025 -# ----------------------------------------------------- - -CONTAINER_NAME="${CONTAINER_NAME,,}" -CONTAINER_ID=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$CONTAINER_NAME" '$3 == name {print $1}') - -if [ -z "$CONTAINER_ID" ]; then - echo "✅ Container with name \"$CONTAINER_NAME\" is available for use." - return 1 -fi - -CONTAINER_OWNERSHIP=$(ssh root@10.15.20.69 -- "jq '.\"$CONTAINER_NAME\".user' /etc/nginx/port_map.json") -if [ "$TYPE_RUNNER" == "true" ] && (( $CONTAINER_ID % 2 == 0 )); then - PVE1="false" -elif [ "$TYPE_RUNNER" == "true" ] && (( $CONTAINER_ID % 2 != 0 )); then - PVE1="true" -fi - -if [ "$CONTAINER_OWNERSHIP" == "null" ]; then - echo "❌ You do not own the container with name \"$CONTAINER_NAME\"." - outputError 1 "You do not own the container with name \"$CONTAINER_NAME\"." -fi \ No newline at end of file diff --git a/ci-cd-automation/setup-runner.sh b/ci-cd-automation/setup-runner.sh deleted file mode 100644 index aaf9fafe..00000000 --- a/ci-cd-automation/setup-runner.sh +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash -# A script for cloning a Distro template, installing, and starting a runner on it. -# Last Modified by Maxwell Klema on August 5th, 2025 -# ------------------------------------------------ - -outputError() { - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - echo -e "${BOLD}${MAGENTA}❌ Script Failed. Exiting... ${RESET}" - echo -e "$2" - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - exit $1 -} - -BOLD='\033[1m' -RESET='\033[0m' - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo "🧬 Cloning a Template and installing a Runner" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -# Validating Container Name ===== - -source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh #Authenticate User -source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh #Ensure container does not exist. - -if [ ! -z "$CONTAINER_OWNERSHIP" ]; then - outputError 1 "You already own a container with name \"$CONTAINER_NAME\". Please delete it before creating a new one." -fi - -# Cloning Container Template and Setting it up ===== - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") -REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) - -TEMPLATE_NAME="template-$REPO_BASE_NAME-$REPO_BASE_NAME_WITH_OWNER" -CTID_TEMPLATE=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$TEMPLATE_NAME" '$3 == name {print $1}') - -case "${LINUX_DISTRIBUTION^^}" in - "") PACKAGE_MANAGER="apt-get" ;; - ROCKY) PACKAGE_MANAGER="dnf" ;; -esac - -# If no template ID was provided, assign a default based on distro - -if [ -z "$CTID_TEMPLATE" ]; then - case "${LINUX_DISTRIBUTION^^}" in - "") CTID_TEMPLATE="160" ;; - ROCKY) CTID_TEMPLATE="138" ;; - esac -fi - -if [ "${LINUX_DISTRIBUTION^^}" != "ROCKY" ]; then - LINUX_DISTRIBUTION="DEBIAN" -fi - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") -REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) - -NEXT_ID=$(pvesh get /cluster/nextid) #Get the next available LXC ID - -# Create the Container Clone -echo "⏳ Cloning Container..." -pct clone $CTID_TEMPLATE $NEXT_ID \ - --hostname $CONTAINER_NAME \ - --full true > /dev/null 2>&1 - -# Set Container Options -echo "⏳ Setting Container Properties..." -pct set $NEXT_ID \ - --tags "$PROXMOX_USERNAME" \ - --tags "$LINUX_DISTRIBUTION" \ - --onboot 1 \ - --cores 4 \ - --memory 4096 > /dev/null 2>&1 - -pct start $NEXT_ID > /dev/null 2>&1 -pveum aclmod /vms/$NEXT_ID --user "$PROXMOX_USERNAME@pve" --role PVEVMUser > /dev/null 2>&1 - -sleep 5 -echo "⏳ DHCP Allocating IP Address..." -CONTAINER_IP=$(pct exec $NEXT_ID -- hostname -I | awk '{print $1}') - -# Setting Up Github Runner ===== - -# Get Temporary Token -echo "🪙 Getting Authentication Token..." -AUTH_TOKEN_RESPONSE=$(curl --location --request POST https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/registration-token --header "Authorization: token $GITHUB_PAT" --write-out "HTTPSTATUS:%{http_code}" --silent) - -HTTP_STATUS=$(echo "$AUTH_TOKEN_RESPONSE" | grep -o "HTTPSTATUS:[0-9]*" | cut -d: -f2) -AUTH_TOKEN_BODY=$(echo "$AUTH_TOKEN_RESPONSE" | sed 's/HTTPSTATUS:[0-9]*$//') - -if [ "$HTTP_STATUS" != "201" ]; then - outputError 1 "Failed to get GitHub authentication token. HTTP Status: $HTTP_STATUS\nResponse: $AUTH_TOKEN_BODY" -fi - -TOKEN=$(echo "$AUTH_TOKEN_BODY" | jq -r '.token') - -pct enter $NEXT_ID < /dev/null 2>&1 -rm -rf /root/container-updates.log || true && \ -cd /actions-runner && export RUNNER_ALLOW_RUNASROOT=1 && \ -runProcess=\$(ps aux | grep "[r]un.sh" | awk '{print \$2}' | head -n 1) && \ -if [ ! -z "\$runProcess" ]; then kill -9 \$runProcess || true; fi && \ -rm -rf .runner .credentials && rm -rf _work/* /var/log/runner/* 2>/dev/null || true && \ -export RUNNER_ALLOW_RUNASROOT=1 && \ -./config.sh --url $PROJECT_REPOSITORY --token $TOKEN --labels $CONTAINER_NAME --name $CONTAINER_NAME --unattended -EOF - -# Generate RSA Keys ===== - -echo "🔑 Generating RSA Key Pair..." -pct exec $NEXT_ID -- bash -c "ssh-keygen -t rsa -N '' -f /root/.ssh/id_rsa -q" -PUB_KEY=$(pct exec $NEXT_ID -- bash -c "cat /root/.ssh/id_rsa.pub") - -# Place public key in all necessary authorized_keys files -echo "$PUB_KEY" >> /home/create-container/.ssh/authorized_keys -echo "$PUB_KEY" >> /home/update-container/.ssh/authorized_keys -echo "$PUB_KEY" >> /home/delete-container/.ssh/authorized_keys -echo "$PUB_KEY" >> /home/container-exists/.ssh/authorized_keys - -echo "🔑 Creating Service File..." -pct exec $NEXT_ID -- bash -c "cat < /etc/systemd/system/github-runner.service -[Unit] -Description=GitHub Actions Runner -After=network.target - -[Service] -Type=simple -WorkingDirectory=/actions-runner -Environment=\"RUNNER_ALLOW_RUNASROOT=1\" -ExecStart=/actions-runner/run.sh -Restart=always - -[Install] -WantedBy=multi-user.target -EOF" - -pct exec $NEXT_ID -- systemctl daemon-reload -pct exec $NEXT_ID -- systemctl enable github-runner -pct exec $NEXT_ID -- systemctl start github-runner - -exit 3 diff --git a/ci-cd-automation/update-container.sh b/ci-cd-automation/update-container.sh deleted file mode 100644 index 0be5e739..00000000 --- a/ci-cd-automation/update-container.sh +++ /dev/null @@ -1,324 +0,0 @@ -#!/bin/bash -# Script to automatically fetch new contents from a branch, push them to container, and restart intern -# Last Modified on August 17th, 2025 by Maxwell Klema -# ---------------------------------------- - -RESET="\033[0m" -BOLD="\033[1m" -MAGENTA='\033[35m' - -outputError() { - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - echo -e "${BOLD}${MAGENTA}❌ Script Failed. Exiting... ${RESET}" - echo -e "$2" - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - exit $1 -} - - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}🔄 Update Container Contents ${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -if [ -z "${RUNTIME_LANGUAGE^^}" ]; then - echo "Skipping container update because there is nothing to update." - exit 0 -fi - -source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh -source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh - -# Get Project Details - -CONTAINER_NAME="${CONTAINER_NAME,,}" - -if [ -z "$PROJECT_REPOSITORY" ]; then - read -p "🚀 Paste the link to your project repository → " PROJECT_REPOSITORY -else - DEPLOY_ON_START="y" -fi - -CheckRepository() { - PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY#*github.com/} - PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY_SHORTENED%.git} - REPOSITORY_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://github.com/$PROJECT_REPOSITORY_SHORTENED) -} - -CheckRepository - -if [ "$REPOSITORY_EXISTS" != "200" ]; then - outputError 1 "The repository link you provided, \"$PROJECT_REPOSITORY\" was not valid." -fi - -echo "✅ The repository link you provided, \"$PROJECT_REPOSITORY\", was valid." - -# Get Project Branch - -if [ -z "$PROJECT_BRANCH" ]; then - PROJECT_BRANCH="main" -fi - -REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://api.github.com/repos/$PROJECT_REPOSITORY_SHORTENED/branches/$PROJECT_BRANCH) - -if [ "$REPOSITORY_BRANCH_EXISTS" != "200" ]; then - outputError 1 "The branch you provided, \"$PROJECT_BRANCH\", does not exist on repository at \"$PROJECT_REPOSITORY\"." -fi - - -# Get Project Root Directroy - -if [ "$PROJECT_ROOT" == "." ] || [ -z "$PROJECT_ROOT" ]; then - PROJECT_ROOT="/" -fi - -VALID_PROJECT_ROOT=$(ssh root@10.15.234.122 "node /root/bin/js/runner.js authenticateRepo \"$PROJECT_REPOSITORY\" \"$PROJECT_BRANCH\" \"$PROJECT_ROOT\"") - -if [ "$VALID_PROJECT_ROOT" == "false" ]; then - outputError 1 "The root directory you provided, \"$PROJECT_ROOT\", does not exist on branch, \"$PROJECT_BRANCH\", on repository at \"$PROJECT_REPOSITORY\"." -fi - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") -REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) - -if [ "$PROJECT_ROOT" == "" ] || [ "$PROJECT_ROOT" == "/" ]; then - PROJECT_ROOT="." -fi - -# Install Services ==== - -echo "🛎️ Installing Services..." - -if [ -z "$LINUX_DISTRIBUTION" ]; then - LINUX_DISTRIBUTION="debian" -fi - -if [ ! -z "$SERVICES" ] || [ ! -z "$CUSTOM_SERVICES" ]; then - REQUIRE_SERVICES="y" -fi - -SERVICE_COMMANDS=$(ssh -o SendEnv="LINUX_DISTRIBUTION SERVICES CUSTOM_SERVICES REQUIRE_SERVICES" \ - root@10.15.234.122 \ - "/root/bin/deployment-scripts/gatherServices.sh true") - -echo "$SERVICE_COMMANDS" | while read -r line; do - pct exec $CONTAINER_ID -- bash -c "$line | true" > /dev/null 2>&1 -done - -# Change HTTP port if necessary ==== - -if [ ! -z "$HTTP_PORT" ]; then - if [ "$HTTP_PORT" -lt 80 ] || [ "$HTTP_PORT" -gt 60000 ]; then - outputError 1 "Invalid HTTP port: $HTTP_PORT. Must be between 80 and 60000." - fi - ssh root@10.15.20.69 -- \ -"jq \ '.[\"$CONTAINER_NAME\"].ports.http = $HTTP_PORT' \ - /etc/nginx/port_map.json > /tmp/port_map.json.new \ - && mv -f /tmp/port_map.json.new /etc/nginx/port_map.json " -fi - -# Clone repository if needed ==== - -if (( "$CONTAINER_ID" % 2 == 0 )); then - ssh root@10.15.0.5 " - pct enter $CONTAINER_ID < /dev/null -fi -EOF - " -else - pct enter $CONTAINER_ID < /dev/null -fi -EOF -fi - -# Update Environment Variables - -if [ ! -z "$RUNTIME_LANGUAGE" ] && echo "$RUNTIME_LANGUAGE" | jq . >/dev/null 2>&1; then # If RUNTIME_LANGUAGE is set and is valid JSON - MULTI_COMPONENT="Y" -fi - -# Helper Function to write environment variables to a file inside container -writeEnvToFile() { - env_file_path="$1" - component_path="$2" - env_vars=$(cat "$env_file_path") - if (( $CONTAINER_ID % 2 == 0 )); then - ssh 10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'if [ ! -f \"$component_path/.env\" ]; then touch \"$component_path/.env\"; fi; echo \"$env_vars\" >> \"$component_path/.env\"'" - else - pct exec $CONTAINER_ID -- bash -c "if [ ! -f \"$component_path/.env\" ]; then touch \"$component_path/.env\"; fi; echo \"$env_vars\" >> \"$component_path/.env\"" - fi -} - -# Check If there are environment variables -if [ ! -z "$CONTAINER_ENV_VARS" ]; then - # generate random temp .env folder to store all env files for different components - RANDOM_NUM=$(shuf -i 100000-999999 -n 1) - ENV_FOLDER="env_$RANDOM_NUM" - ENV_FOLDER_PATH="/var/lib/vz/snippets/container-env-vars/$ENV_FOLDER" - mkdir -p "$ENV_FOLDER_PATH" - - if [ "${MULTI_COMPONENT^^}" == "Y" ]; then # Multi-Component - if echo "$CONTAINER_ENV_VARS" | jq -e > /dev/null 2>&1; then #if exit status of jq is 0 (valid JSON) // success - for key in $(echo "$CONTAINER_ENV_VARS" | jq -r 'keys[]'); do - COMPONENT_PATH="/root/$REPO_BASE_NAME/$PROJECT_ROOT/$key" - ENV_FILE_NAME=$(echo "$COMPONENT_PATH" | tr '/' '_') - ENV_FILE_NAME="$ENV_FILE_NAME.txt" - ENV_FILE_PATH="$ENV_FOLDER_PATH/$ENV_FILE_NAME" - touch "$ENV_FILE_PATH" - echo "$CONTAINER_ENV_VARS" | jq -r --arg key "$key" '.[$key] | to_entries[] | "\(.key)=\(.value)"' > "$ENV_FILE_PATH" - writeEnvToFile "$ENV_FILE_PATH" "$COMPONENT_PATH" - done - else - outputError "Your \"CONTAINER_ENV_VARS\" is not valid JSON. Please re-format and try again." - writeLog "Invalid JSON in CONTAINER_ENV_VARS (GH_ACTION mode)" - exit 16 - fi - else # Single Component - ENV_FILE="env_$RANDOM_NUM.txt" - ENV_FILE_PATH="$ENV_FOLDER_PATH/$ENV_FILE" - touch "$ENV_FILE_PATH" - if echo "$CONTAINER_ENV_VARS" | jq -e > /dev/null 2>&1; then #if exit status of jq is 0 (valid JSON) // success - COMPONENT_PATH="/root/$REPO_BASE_NAME/$PROJECT_ROOT" - echo "$CONTAINER_ENV_VARS " | jq -r 'to_entries[] | "\(.key)=\(.value)"' > "$ENV_FILE_PATH" #k=v pairs - writeEnvToFile "$ENV_FILE_PATH" "$COMPONENT_PATH" - else - outputError "Your \"CONTAINER_ENV_VARS\" is not valid JSON. Please re-format and try again." - writeLog "Invalid JSON in CONTAINER_ENV_VARS for single component (GH_ACTION mode)" - exit 16 - fi - fi -fi - -# Update Container with New Contents from repository ===== - -startComponentPVE1() { - - RUNTIME="$1" - BUILD_CMD="$2" - START_CMD="$3" - COMP_DIR="$4" - INSTALL_CMD="$5" - - if [ "${RUNTIME^^}" == "NODEJS" ]; then - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull" > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $INSTALL_CMD && $BUILD_CMD" > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - elif [ "${RUNTIME^^}" == "PYTHON" ]; then - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull" > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $INSTALL_CMD && $BUILD_CMD" > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - fi -} - -startComponentPVE2() { - - RUNTIME="$1" - BUILD_CMD="$2" - START_CMD="$3" - COMP_DIR="$4" - INSTALL_CMD="$5" - - if [ "${RUNTIME^^}" == "NODEJS" ]; then - ssh root@10.15.0.5 " - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 && - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull' > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $INSTALL_CMD' && '$BUILD_CMD' > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - " - elif [ "${RUNTIME^^}" == "PYTHON" ]; then - ssh root@10.15.0.5 " - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 && - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull' > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $INSTALL_CMD' && '$BUILD_CMD' > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - " - fi -} - -if [ "${MULTI_COMPONENT^^}" == "Y" ]; then - for COMPONENT in $(echo "$START_COMMAND" | jq -r 'keys[]'); do - START=$(echo "$START_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - RUNTIME=$(echo "$RUNTIME_LANGUAGE" | jq -r --arg k "$COMPONENT" '.[$k]') - BUILD=$(echo "$BUILD_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - INSTALL=$(echo "$INSTALL_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - if [ "$BUILD" == "null" ]; then - BUILD="" - fi - - if (( "$CONTAINER_ID" % 2 == 0 )); then - startComponentPVE2 "$RUNTIME" "$BUILD" "$START" "$COMPONENT" "$INSTALL" - else - startComponentPVE1 "$RUNTIME" "$BUILD" "$START" "$COMPONENT" "$INSTALL" - fi - done - if [ ! -z "$ROOT_START_COMMAND" ]; then - if (( $CONTAINER_ID % 2 == 0 )); then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND'" - else - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND" - fi - fi - # startComponent "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." -else - if (( $CONTAINER_ID % 2 == 0 )); then - startComponentPVE2 "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." "$INSTALL_COMMAND" - if [ ! -z "$ROOT_START_COMMAND" ]; then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND'" > /dev/null 2>&1 - fi - else - startComponentPVE1 "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." "$INSTALL_COMMAND" - if [ ! -z "$ROOT_START_COMMAND" ]; then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND'" > /dev/null 2>&1 - fi - fi -fi - -# Update Log File - -if (( "$CONTAINER_ID" % 2 == 0 )); then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'echo \"[$(date)]\" >> /root/container-updates.log'" -else - pct exec $CONTAINER_ID -- bash -c "echo \"[$(date)]\" >> /root/container-updates.log" -fi - -# Create new template if on default branch ===== - -UPDATE_CONTAINER="true" -BUILD_COMMAND_B64=$(echo -n "$BUILD_COMMAND" | base64) -RUNTIME_LANGUAGE_B64=$(echo -n "$RUNTIME_LANGUAGE" | base64) -START_COMMAND_B64=$(echo -n "$START_COMMAND" | base64) - -CMD=( -bash /var/lib/vz/snippets/start_services.sh -"$CONTAINER_ID" -"$CONTAINER_NAME" -"$REPO_BASE_NAME" -"$REPO_BASE_NAME_WITH_OWNER" -"$SSH_PORT" -"$CONTAINER_IP" -"$PROJECT_ROOT" -"$ROOT_START_COMMAND" -"$DEPLOY_ON_START" -"$MULTI_COMPONENT" -"$START_COMMAND_B64" -"$BUILD_COMMAND_B64" -"$RUNTIME_LANGUAGE_B64" -"$GH_ACTION" -"$PROJECT_BRANCH" -"$UPDATE_CONTAINER" -) - -# Safely quote each argument for the shell -QUOTED_CMD=$(printf ' %q' "${CMD[@]}") - -tmux new-session -d -s "$CONTAINER_NAME" "$QUOTED_CMD" -echo "✅ Container $CONTAINER_ID has been updated with new contents from branch \"$PROJECT_BRANCH\" on repository \"$PROJECT_REPOSITORY\"." -echo "Wait a few minutes for all background processes to complete before accessing the container." -exit 0 diff --git a/create-a-container/bin/create-container.js b/create-a-container/bin/create-container.js index b9bba84e..7686e68e 100755 --- a/create-a-container/bin/create-container.js +++ b/create-a-container/bin/create-container.js @@ -25,7 +25,6 @@ */ const path = require('path'); -const https = require('https'); // Load models from parent directory const db = require(path.join(__dirname, '..', 'models')); @@ -33,183 +32,7 @@ const { Container, Node, Site } = db; // Load utilities const { parseArgs } = require(path.join(__dirname, '..', 'utils', 'cli')); - -/** - * Low-level HTTP GET that returns status, headers, and body without throwing on 4xx - * @param {string} url - The URL to fetch - * @param {object} headers - Optional request headers - * @returns {Promise<{statusCode: number, headers: object, body: string}>} - */ -function httpGet(url, headers = {}) { - return new Promise((resolve, reject) => { - const req = https.get(url, { headers }, (res) => { - let data = ''; - res.on('data', chunk => data += chunk); - res.on('end', () => { - resolve({ statusCode: res.statusCode, headers: res.headers, body: data }); - }); - }); - req.on('error', reject); - }); -} - -/** - * Fetch JSON from a URL with optional headers (throws on non-2xx) - * @param {string} url - The URL to fetch - * @param {object} headers - Optional headers - * @returns {Promise} Parsed JSON response - */ -async function fetchJson(url, headers = {}) { - const res = await httpGet(url, headers); - if (res.statusCode >= 400) { - throw new Error(`HTTP ${res.statusCode}: ${res.body}`); - } - return JSON.parse(res.body); -} - -/** - * Parse a WWW-Authenticate Bearer challenge header - * Example: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/nginx:pull" - * @param {string} header - The WWW-Authenticate header value - * @returns {object|null} Parsed fields { realm, service, scope } or null if not Bearer - */ -function parseWwwAuthenticate(header) { - if (!header || !header.startsWith('Bearer ')) return null; - const params = {}; - const regex = /(\w+)="([^"]*)"/g; - let match; - while ((match = regex.exec(header)) !== null) { - params[match[1]] = match[2]; - } - return params; -} - -/** - * Fetch JSON from a registry URL with automatic token authentication - * Implements the Docker Registry Token Authentication spec: - * 1. Attempt the request - * 2. If 401, parse WWW-Authenticate for Bearer challenge - * 3. Request a token from the auth service - * 4. Retry with the Bearer token - * @param {string} url - The registry URL to fetch - * @param {object} headers - Optional request headers - * @returns {Promise} Parsed JSON response - */ -async function authenticatedFetchJson(url, headers = {}) { - const res = await httpGet(url, headers); - - if (res.statusCode === 200) { - return JSON.parse(res.body); - } - - if (res.statusCode !== 401) { - throw new Error(`HTTP ${res.statusCode}: ${res.body}`); - } - - // Parse the Bearer challenge from WWW-Authenticate header - const challenge = parseWwwAuthenticate(res.headers['www-authenticate']); - if (!challenge || !challenge.realm) { - throw new Error(`Registry returned 401 but no Bearer challenge in WWW-Authenticate header`); - } - - // Build token request URL with query parameters from the challenge - const tokenUrl = new URL(challenge.realm); - if (challenge.service) tokenUrl.searchParams.set('service', challenge.service); - if (challenge.scope) tokenUrl.searchParams.set('scope', challenge.scope); - - const tokenData = await fetchJson(tokenUrl.toString()); - if (!tokenData.token) { - throw new Error('Auth service did not return a token'); - } - - // Retry the original request with the Bearer token - headers['Authorization'] = `Bearer ${tokenData.token}`; - const retryRes = await httpGet(url, headers); - if (retryRes.statusCode >= 400) { - throw new Error(`HTTP ${retryRes.statusCode} after auth: ${retryRes.body}`); - } - return JSON.parse(retryRes.body); -} - -/** - * Get the digest (sha256 hash) of a Docker/OCI image from the registry - * Handles both single-arch and multi-arch (manifest list) images - * @param {string} registry - Registry hostname (e.g., 'docker.io') - * @param {string} repo - Repository (e.g., 'library/nginx') - * @param {string} tag - Tag (e.g., 'latest') - * @returns {Promise} Short digest (first 12 chars of sha256) - */ -async function getImageDigest(registry, repo, tag) { - const registryHost = registry === 'docker.io' ? 'registry-1.docker.io' : registry; - - // Fetch manifest with automatic registry auth challenge-response - const acceptHeaders = { - 'Accept': [ - 'application/vnd.docker.distribution.manifest.v2+json', - 'application/vnd.oci.image.manifest.v1+json', - 'application/vnd.docker.distribution.manifest.list.v2+json', - 'application/vnd.oci.image.index.v1+json' - ].join(', ') - }; - - const manifestUrl = `https://${registryHost}/v2/${repo}/manifests/${tag}`; - let manifest = await authenticatedFetchJson(manifestUrl, { ...acceptHeaders }); - - // Handle manifest list (multi-arch) - select amd64/linux - if (manifest.manifests && Array.isArray(manifest.manifests)) { - const amd64Manifest = manifest.manifests.find(m => - m.platform?.architecture === 'amd64' && m.platform?.os === 'linux' - ); - if (!amd64Manifest) { - throw new Error('No amd64/linux manifest found in manifest list'); - } - - // Fetch the actual manifest for amd64 (reuse same auth flow) - const archHeaders = { - 'Accept': 'application/vnd.docker.distribution.manifest.v2+json, application/vnd.oci.image.manifest.v1+json' - }; - const archManifestUrl = `https://${registryHost}/v2/${repo}/manifests/${amd64Manifest.digest}`; - manifest = await authenticatedFetchJson(archManifestUrl, { ...archHeaders }); - } - - // Get config digest from manifest - const configDigest = manifest.config?.digest; - if (!configDigest) { - throw new Error('No config digest in manifest'); - } - - // Return short hash (sha256:abc123... -> abc123...) - const hash = configDigest.replace('sha256:', ''); - return hash.substring(0, 12); -} - -/** - * Check if a template is a Docker image reference (contains '/') - * @param {string} template - The template string - * @returns {boolean} True if Docker image, false if Proxmox template - */ -function isDockerImage(template) { - return template.includes('/'); -} - -/** - * Parse a normalized Docker image reference into components - * Format: host/org/image:tag - * @param {string} ref - The normalized Docker reference - * @returns {object} Parsed components: { registry, namespace, image, tag } - */ -function parseDockerRef(ref) { - // Split off tag - const [imagePart, tag] = ref.split(':'); - const parts = imagePart.split('/'); - - // Format is always host/org/image after normalization - const registry = parts[0]; - const image = parts[parts.length - 1]; - const namespace = parts.slice(1, -1).join('/'); - - return { registry, namespace, image, tag }; -} +const { isDockerImage, parseDockerRef, getImageDigest } = require(path.join(__dirname, '..', 'utils', 'docker-registry')); /** * Generate a filename for a pulled Docker image @@ -225,6 +48,62 @@ function generateImageFilename(parsed, digest) { return sanitized; } +/** + * Setup ACL for container owner + * Grants PVEVMUser role to username@ldap on /vms/{vmid} + * Non-blocking: logs errors but continues on failure + * @param {ProxmoxApi} client - Proxmox API client + * @param {string} nodeName - Node name for logging + * @param {number} vmid - Container VMID + * @param {string} username - Container owner username + * @returns {Promise} True if ACL created successfully, false otherwise + */ +async function setupContainerAcl(client, nodeName, vmid, username) { + const userWithRealm = `${username}@ldap`; + const aclPath = `/vms/${vmid}`; + + console.log(`Setting up ACL for ${userWithRealm} on ${aclPath}...`); + + try { + // Attempt to create ACL + await client.updateAcl(aclPath, 'PVEVMUser', null, true, null, userWithRealm); + console.log(`ACL created successfully: ${userWithRealm} -> PVEVMUser on ${aclPath}`); + return true; + } catch (firstError) { + console.log(`ACL creation failed: ${firstError.message}`); + + // Check if error is due to user not existing + const errorMsg = firstError.response?.data?.errors || firstError.message || ''; + const isUserNotFound = errorMsg.toLowerCase().includes('user') && + (errorMsg.toLowerCase().includes('not found') || + errorMsg.toLowerCase().includes('does not exist')); + + if (isUserNotFound) { + console.log('User not found in Proxmox LDAP realm, attempting LDAP sync...'); + + try { + // Sync LDAP realm + await client.syncLdapRealm('ldap'); + console.log('LDAP realm sync completed successfully'); + + // Retry ACL creation + console.log('Retrying ACL creation...'); + await client.updateAcl(aclPath, 'PVEVMUser', null, true, null, userWithRealm); + console.log(`ACL created successfully after sync: ${userWithRealm} -> PVEVMUser on ${aclPath}`); + return true; + } catch (syncError) { + console.log(`LDAP sync or retry failed: ${syncError.message}`); + console.log('Continuing without ACL - container owner will need manual access grant'); + return false; + } + } else { + console.log('ACL creation failed for non-user-related reason'); + console.log('Continuing without ACL - container owner will need manual access grant'); + return false; + } + } +} + /** * Main function */ @@ -446,6 +325,9 @@ async function main() { console.log('Environment/entrypoint configuration applied'); } + // Setup ACL for container owner + await setupContainerAcl(client, node.name, vmid, container.username); + // Store the VMID now that creation succeeded await container.update({ containerId: vmid }); console.log(`Container VMID ${vmid} stored in database`); diff --git a/create-a-container/compose.ldap.env b/create-a-container/compose.ldap.env new file mode 100644 index 00000000..397bf5d2 --- /dev/null +++ b/create-a-container/compose.ldap.env @@ -0,0 +1,10 @@ +LOG_LEVEL=debug +DIRECTORY_BACKEND=sql +REQUIRE_AUTH_FOR_SEARCH=false +AUTH_BACKENDS=sql +LDAP_COMMON_NAME=ldap +LDAP_BASE_DN=dc=docker,dc=internal +SQL_QUERY_ALL_USERS='SELECT "uid" AS username, "uidNumber" AS uid_number, "gidNumber" AS gid_number, "cn" AS full_name, "sn" AS last_name, "mail", "homeDirectory" AS home_directory, "userPassword" AS password, "givenName" as first_name FROM "Users"' +SQL_QUERY_ONE_USER='SELECT "uid" AS username, "uidNumber" AS uid_number, "gidNumber" AS gid_number, "cn" AS full_name, "sn" AS last_name, "mail", "homeDirectory" AS home_directory, "userPassword" AS password, "givenName" as first_name FROM "Users" WHERE "uid" = ?' +SQL_QUERY_ALL_GROUPS='SELECT g."cn" AS name, g."gidNumber" AS gid_number FROM "Groups" g' +SQL_QUERY_GROUPS_BY_MEMBER='SELECT g."cn" AS name, g."gidNumber" AS gid_number FROM "Groups" g INNER JOIN "UserGroups" ug ON g."gidNumber" = ug."gidNumber" INNER JOIN "Users" u ON ug."uidNumber" = u."uidNumber" WHERE u."uid" = ?' diff --git a/create-a-container/compose.yml b/create-a-container/compose.yml index d3a6a344..8308c6c5 100644 --- a/create-a-container/compose.yml +++ b/create-a-container/compose.yml @@ -11,5 +11,11 @@ services: ports: - 5432:5432 + ldap: + image: ghcr.io/mieweb/ldap-gateway:latest + env_file: compose.ldap.env + environment: + SQL_URI: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DATABASE} + volumes: - postgres-data: \ No newline at end of file + postgres-data: diff --git a/create-a-container/routers/containers.js b/create-a-container/routers/containers.js index 120003ea..ab2639da 100644 --- a/create-a-container/routers/containers.js +++ b/create-a-container/routers/containers.js @@ -7,6 +7,7 @@ const { requireAuth } = require('../middlewares'); const ProxmoxApi = require('../utils/proxmox-api'); const serviceMap = require('../data/services.json'); const { isApiRequest } = require('../utils/http'); +const { parseDockerRef, getImageConfig, extractImageMetadata } = require('../utils/docker-registry'); /** * Normalize a Docker image reference to full format: host/org/image:tag @@ -54,6 +55,48 @@ function normalizeDockerRef(ref) { return `${host}/${org}/${image}:${tag}`; } +// GET /sites/:siteId/containers/metadata - Fetch Docker image metadata +router.get('/metadata', requireAuth, async (req, res) => { + try { + const { image } = req.query; + + if (!image || !image.trim()) { + return res.status(400).json({ error: 'Image parameter is required' }); + } + + // Normalize the image reference + const normalizedImage = normalizeDockerRef(image.trim()); + + // Parse into components + const parsed = parseDockerRef(normalizedImage); + const repo = `${parsed.namespace}/${parsed.image}`; + + // Fetch image config from registry + const config = await getImageConfig(parsed.registry, repo, parsed.tag); + + // Extract metadata + const metadata = extractImageMetadata(config); + + return res.json(metadata); + } catch (err) { + console.error('Error fetching image metadata:', err); + + let errorMessage = 'Failed to fetch image metadata'; + if (err.message.includes('HTTP 404')) { + errorMessage = 'Image not found in registry'; + } else if (err.message.includes('timeout')) { + errorMessage = 'Request timed out. Registry may be unavailable.'; + } else if (err.message.includes('auth')) { + errorMessage = 'Authentication failed. Image may be private.'; + } + + return res.status(500).json({ + error: errorMessage, + details: err.message + }); + } +}); + // GET /sites/:siteId/containers/new - List available templates via API or HTML form router.get('/new', requireAuth, async (req, res) => { const siteId = parseInt(req.params.siteId, 10); @@ -67,38 +110,6 @@ router.get('/new', requireAuth, async (req, res) => { return res.redirect('/sites'); } - // Get valid container templates from all nodes in this site - const templates = []; - const nodes = await Node.findAll({ - where: { - [Sequelize.Op.and]: { - siteId, - apiUrl: { [Sequelize.Op.ne]: null }, - tokenId: { [Sequelize.Op.ne]: null }, - secret: { [Sequelize.Op.ne]: null } - } - }, - }); - - for (const node of nodes) { - try { - const client = await node.api(); - const lxcTemplates = await client.getLxcTemplates(node.name); - - for (const lxc of lxcTemplates) { - templates.push({ - // Proxmox usually returns 'name' (filename) or 'volid' - name: lxc.name || lxc.volid, - vmid: lxc.vmid, - size: lxc.size, - node: node.name - }); - } - } catch (err) { - console.error(`Error fetching templates from node ${node.name}:`, err.message); - } - } - // Get external domains for this site const externalDomains = await ExternalDomain.findAll({ where: { siteId }, @@ -108,7 +119,6 @@ router.get('/new', requireAuth, async (req, res) => { if (isApi) { return res.json({ site_id: site.id, - templates: templates, domains: externalDomains }); } @@ -116,7 +126,6 @@ router.get('/new', requireAuth, async (req, res) => { return res.render('containers/form', { site, - templates, externalDomains, container: undefined, req @@ -687,4 +696,4 @@ router.delete('/:id', requireAuth, async (req, res) => { return res.redirect(`/sites/${siteId}/containers`); }); -module.exports = router; \ No newline at end of file +module.exports = router; diff --git a/create-a-container/routers/register.js b/create-a-container/routers/register.js index 6eb88cdc..e1afb524 100644 --- a/create-a-container/routers/register.js +++ b/create-a-container/routers/register.js @@ -61,15 +61,18 @@ router.post('/', async (req, res) => { status = 'pending'; // Regular registrations are pending } + const givenName = req.body.givenName.trim(); + const sn = req.body.sn.trim(); + const userParams = { uidNumber: await User.nextUidNumber(), uid: req.body.uid, - sn: req.body.sn, - givenName: req.body.givenName, + sn, + givenName, mail: req.body.mail, userPassword: req.body.userPassword, status, - cn: `${req.body.givenName} ${req.body.sn}`, + cn: `${givenName} ${sn}`, homeDirectory: `/home/${req.body.uid}`, }; diff --git a/create-a-container/routers/sites.js b/create-a-container/routers/sites.js index 0824deae..cf1ea5aa 100644 --- a/create-a-container/routers/sites.js +++ b/create-a-container/routers/sites.js @@ -168,8 +168,9 @@ router.get('/:siteId/ldap.conf', requireLocalhost, async (req, res) => { ${qi.quoteIdentifier('uid')} AS username, ${qi.quoteIdentifier('uidNumber')} AS uid_number, ${qi.quoteIdentifier('gidNumber')} AS gid_number, + ${qi.quoteIdentifier('givenName')} AS first_name, ${qi.quoteIdentifier('cn')} AS full_name, - ${qi.quoteIdentifier('sn')} AS surname, + ${qi.quoteIdentifier('sn')} AS last_name, ${qi.quoteIdentifier('mail')}, ${qi.quoteIdentifier('homeDirectory')} AS home_directory, ${qi.quoteIdentifier('userPassword')} AS password diff --git a/create-a-container/routers/users.js b/create-a-container/routers/users.js index a6afe0b0..3ca642c3 100644 --- a/create-a-container/routers/users.js +++ b/create-a-container/routers/users.js @@ -132,7 +132,10 @@ router.get('/:id/edit', async (req, res) => { // POST /users - Create a new user router.post('/', async (req, res) => { try { - const { uid, givenName, sn, mail, userPassword, status, groupIds } = req.body; + const { uid, givenName: rawGivenName, sn: rawSn, mail, userPassword, status, groupIds } = req.body; + + const givenName = rawGivenName.trim(); + const sn = rawSn.trim(); const user = await User.create({ uidNumber: await User.nextUidNumber(), @@ -178,7 +181,10 @@ router.put('/:id', async (req, res) => { return res.redirect('/users'); } - const { uid, givenName, sn, mail, userPassword, status, groupIds } = req.body; + const { uid, givenName: rawGivenName, sn: rawSn, mail, userPassword, status, groupIds } = req.body; + + const givenName = rawGivenName.trim(); + const sn = rawSn.trim(); // Update user fields user.uid = uid; diff --git a/create-a-container/utils/docker-registry.js b/create-a-container/utils/docker-registry.js new file mode 100644 index 00000000..7afdc7b0 --- /dev/null +++ b/create-a-container/utils/docker-registry.js @@ -0,0 +1,380 @@ +/** + * docker-registry.js + * + * Utility functions for interacting with Docker/OCI registries. + * Implements the Docker Registry HTTP API V2 specification. + * Supports automatic token-based authentication challenges. + */ + +const https = require('https'); + +/** + * Low-level HTTP GET that returns status, headers, and body without throwing on 4xx + * @param {string} url - The URL to fetch + * @param {object} headers - Optional request headers + * @returns {Promise<{statusCode: number, headers: object, body: string}>} + */ +function httpGet(url, headers = {}, redirectCount = 0) { + const MAX_REDIRECTS = 5; + + return new Promise((resolve, reject) => { + if (redirectCount > MAX_REDIRECTS) { + return reject(new Error('Too many redirects')); + } + + let timedOut = false; + const req = https.get(url, { headers }, (res) => { + // Handle redirects + if (res.statusCode === 301 || res.statusCode === 302 || res.statusCode === 307 || res.statusCode === 308) { + const location = res.headers.location; + if (!location) { + return reject(new Error(`Redirect without Location header (status ${res.statusCode})`)); + } + // Follow redirect (without auth headers for CDN) + return httpGet(location, {}, redirectCount + 1) + .then(resolve) + .catch(reject); + } + + let data = ''; + res.on('data', chunk => data += chunk); + res.on('end', () => { + if (timedOut) return; + resolve({ statusCode: res.statusCode, headers: res.headers, body: data }); + }); + }); + req.on('error', (err) => { + if (!timedOut) reject(err); + }); + req.setTimeout(120000, () => { + timedOut = true; + req.destroy(); + reject(new Error('Request timeout after 120 seconds')); + }); + }); +} + +/** + * Fetch JSON from a URL with optional headers (throws on non-2xx) + * @param {string} url - The URL to fetch + * @param {object} headers - Optional headers + * @returns {Promise} Parsed JSON response + */ +async function fetchJson(url, headers = {}) { + const res = await httpGet(url, headers); + if (res.statusCode >= 400) { + throw new Error(`HTTP ${res.statusCode}: ${res.body}`); + } + if (!res.body || res.body.trim() === '') { + throw new Error('Empty response body'); + } + try { + return JSON.parse(res.body); + } catch (err) { + throw new Error(`Failed to parse JSON: ${err.message}. Body: ${res.body.substring(0, 200)}`); + } +} + +/** + * Parse a WWW-Authenticate Bearer challenge header + * Example: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/nginx:pull" + * @param {string} header - The WWW-Authenticate header value + * @returns {object|null} Parsed fields { realm, service, scope } or null if not Bearer + */ +function parseWwwAuthenticate(header) { + if (!header || !header.startsWith('Bearer ')) return null; + const params = {}; + const regex = /(\w+)="([^"]*)"/g; + let match; + while ((match = regex.exec(header)) !== null) { + params[match[1]] = match[2]; + } + return params; +} + +/** + * Fetch JSON from a registry URL with automatic token authentication + * Implements the Docker Registry Token Authentication spec: + * 1. Attempt the request + * 2. If 401, parse WWW-Authenticate for Bearer challenge + * 3. Request a token from the auth service + * 4. Retry with the Bearer token + * @param {string} url - The registry URL to fetch + * @param {object} headers - Optional request headers + * @returns {Promise} Parsed JSON response + */ +async function authenticatedFetchJson(url, headers = {}) { + const res = await httpGet(url, headers); + + if (res.statusCode === 200) { + if (!res.body || res.body.trim() === '') { + throw new Error('Empty response body from registry'); + } + try { + return JSON.parse(res.body); + } catch (err) { + throw new Error(`Failed to parse JSON response: ${err.message}`); + } + } + + if (res.statusCode !== 401) { + throw new Error(`HTTP ${res.statusCode}: ${res.body}`); + } + + // Parse the Bearer challenge from WWW-Authenticate header + const challenge = parseWwwAuthenticate(res.headers['www-authenticate']); + if (!challenge || !challenge.realm) { + throw new Error(`Registry returned 401 but no Bearer challenge in WWW-Authenticate header`); + } + + // Build token request URL with query parameters from the challenge + const tokenUrl = new URL(challenge.realm); + if (challenge.service) tokenUrl.searchParams.set('service', challenge.service); + if (challenge.scope) tokenUrl.searchParams.set('scope', challenge.scope); + + const tokenData = await fetchJson(tokenUrl.toString()); + if (!tokenData.token) { + throw new Error('Auth service did not return a token'); + } + + // Retry the original request with the Bearer token + headers['Authorization'] = `Bearer ${tokenData.token}`; + const retryRes = await httpGet(url, headers); + if (retryRes.statusCode >= 400) { + throw new Error(`HTTP ${retryRes.statusCode} after auth: ${retryRes.body}`); + } + if (!retryRes.body || retryRes.body.trim() === '') { + throw new Error('Empty response body after auth'); + } + try { + return JSON.parse(retryRes.body); + } catch (err) { + throw new Error(`Failed to parse authenticated response: ${err.message}`); + } +} + +/** + * Check if a template is a Docker image reference (contains '/') + * @param {string} template - The template string + * @returns {boolean} True if Docker image, false if Proxmox template + */ +function isDockerImage(template) { + return template.includes('/'); +} + +/** + * Parse a normalized Docker image reference into components + * Format: host/org/image:tag + * @param {string} ref - The normalized Docker reference + * @returns {object} Parsed components: { registry, namespace, image, tag } + */ +function parseDockerRef(ref) { + // Split off tag + const [imagePart, tag] = ref.split(':'); + const parts = imagePart.split('/'); + + // Format is always host/org/image after normalization + const registry = parts[0]; + const image = parts[parts.length - 1]; + const namespace = parts.slice(1, -1).join('/'); + + return { registry, namespace, image, tag }; +} + +/** + * Get the digest (sha256 hash) of a Docker/OCI image from the registry + * Handles both single-arch and multi-arch (manifest list) images + * @param {string} registry - Registry hostname (e.g., 'docker.io') + * @param {string} repo - Repository (e.g., 'library/nginx') + * @param {string} tag - Tag (e.g., 'latest') + * @returns {Promise} Short digest (first 12 chars of sha256) + */ +async function getImageDigest(registry, repo, tag) { + const registryHost = registry === 'docker.io' ? 'registry-1.docker.io' : registry; + + // Fetch manifest with automatic registry auth challenge-response + const acceptHeaders = { + 'Accept': [ + 'application/vnd.docker.distribution.manifest.v2+json', + 'application/vnd.oci.image.manifest.v1+json', + 'application/vnd.docker.distribution.manifest.list.v2+json', + 'application/vnd.oci.image.index.v1+json' + ].join(', ') + }; + + const manifestUrl = `https://${registryHost}/v2/${repo}/manifests/${tag}`; + let manifest = await authenticatedFetchJson(manifestUrl, { ...acceptHeaders }); + + // Handle manifest list (multi-arch) - select amd64/linux + if (manifest.manifests && Array.isArray(manifest.manifests)) { + const amd64Manifest = manifest.manifests.find(m => + m.platform?.architecture === 'amd64' && m.platform?.os === 'linux' + ); + if (!amd64Manifest) { + throw new Error('No amd64/linux manifest found in manifest list'); + } + + // Fetch the actual manifest for amd64 (reuse same auth flow) + const archHeaders = { + 'Accept': 'application/vnd.docker.distribution.manifest.v2+json, application/vnd.oci.image.manifest.v1+json' + }; + const archManifestUrl = `https://${registryHost}/v2/${repo}/manifests/${amd64Manifest.digest}`; + manifest = await authenticatedFetchJson(archManifestUrl, { ...archHeaders }); + } + + // Get config digest from manifest + const configDigest = manifest.config?.digest; + if (!configDigest) { + throw new Error('No config digest in manifest'); + } + + // Return short hash (sha256:abc123... -> abc123...) + const hash = configDigest.replace('sha256:', ''); + return hash.substring(0, 12); +} + +/** + * Fetch the image configuration blob from the registry + * Contains metadata like EXPOSE, ENV, ENTRYPOINT, CMD + * @param {string} registry - Registry hostname (e.g., 'docker.io') + * @param {string} repo - Repository (e.g., 'library/nginx') + * @param {string} tag - Tag (e.g., 'latest') + * @returns {Promise} Image config object + */ +async function getImageConfig(registry, repo, tag) { + const registryHost = registry === 'docker.io' ? 'registry-1.docker.io' : registry; + + // First, fetch the manifest to get the config digest + const acceptHeaders = { + 'Accept': [ + 'application/vnd.docker.distribution.manifest.v2+json', + 'application/vnd.oci.image.manifest.v1+json', + 'application/vnd.docker.distribution.manifest.list.v2+json', + 'application/vnd.oci.image.index.v1+json' + ].join(', ') + }; + + const manifestUrl = `https://${registryHost}/v2/${repo}/manifests/${tag}`; + let manifest = await authenticatedFetchJson(manifestUrl, { ...acceptHeaders }); + + // Handle manifest list (multi-arch) - select amd64/linux + if (manifest.manifests && Array.isArray(manifest.manifests)) { + const amd64Manifest = manifest.manifests.find(m => + m.platform?.architecture === 'amd64' && m.platform?.os === 'linux' + ); + if (!amd64Manifest) { + throw new Error('No amd64/linux manifest found in manifest list'); + } + + const archHeaders = { + 'Accept': 'application/vnd.docker.distribution.manifest.v2+json, application/vnd.oci.image.manifest.v1+json' + }; + const archManifestUrl = `https://${registryHost}/v2/${repo}/manifests/${amd64Manifest.digest}`; + manifest = await authenticatedFetchJson(archManifestUrl, { ...archHeaders }); + } + + // Get config digest from manifest + const configDigest = manifest.config?.digest; + if (!configDigest) { + throw new Error('No config digest in manifest'); + } + + // Fetch the config blob + const blobUrl = `https://${registryHost}/v2/${repo}/blobs/${configDigest}`; + const config = await authenticatedFetchJson(blobUrl); + + return config; +} + +/** + * Extract metadata from image config for container creation + * @param {object} config - Raw image config from registry + * @returns {object} Structured metadata: { ports, httpServices, env, entrypoint } + */ +function extractImageMetadata(config) { + const metadata = { + ports: [], + httpServices: [], + env: {}, + entrypoint: '' + }; + + // Extract HTTP service from OCI labels first + // Label: org.mieweb.opensource-server.services.http.default-port + let httpServicePort = null; + if (config.config?.Labels) { + const httpPortLabel = config.config.Labels['org.mieweb.opensource-server.services.http.default-port']; + if (httpPortLabel) { + const port = parseInt(httpPortLabel, 10); + if (!isNaN(port) && port > 0 && port <= 65535) { + httpServicePort = port; + metadata.httpServices.push({ + port: port + }); + } + } + } + + // Extract exposed ports (excluding HTTP service port on TCP to avoid duplicates) + // Format: { "80/tcp": {}, "443/tcp": {}, "8080/udp": {} } + if (config.config?.ExposedPorts) { + for (const portSpec of Object.keys(config.config.ExposedPorts)) { + const [port, protocol = 'tcp'] = portSpec.split('/'); + const portNum = parseInt(port, 10); + + // Skip if this port is designated as an HTTP service AND it's TCP + // (HTTP runs over TCP, but keep UDP ports even if same number) + if (portNum === httpServicePort && protocol.toLowerCase() === 'tcp') { + continue; + } + + metadata.ports.push({ + port: portNum, + protocol: protocol.toLowerCase() + }); + } + } + + // Extract environment variables + // Format: ["KEY1=value1", "KEY2=value2", "PATH=/usr/bin"] + const skipEnvVars = new Set(['PATH', 'HOME', 'HOSTNAME', 'TERM', 'USER']); + if (config.config?.Env && Array.isArray(config.config.Env)) { + for (const envStr of config.config.Env) { + const eqIndex = envStr.indexOf('='); + if (eqIndex > 0) { + const key = envStr.substring(0, eqIndex); + const value = envStr.substring(eqIndex + 1); + if (!skipEnvVars.has(key)) { + metadata.env[key] = value; + } + } + } + } + + // Extract and concatenate ENTRYPOINT + CMD + // Both are arrays of strings + const entrypointParts = []; + if (config.config?.Entrypoint && Array.isArray(config.config.Entrypoint)) { + entrypointParts.push(...config.config.Entrypoint); + } + if (config.config?.Cmd && Array.isArray(config.config.Cmd)) { + entrypointParts.push(...config.config.Cmd); + } + if (entrypointParts.length > 0) { + metadata.entrypoint = entrypointParts.join(' '); + } + + return metadata; +} + +module.exports = { + httpGet, + fetchJson, + parseWwwAuthenticate, + authenticatedFetchJson, + isDockerImage, + parseDockerRef, + getImageDigest, + getImageConfig, + extractImageMetadata +}; diff --git a/create-a-container/utils/proxmox-api.js b/create-a-container/utils/proxmox-api.js index 505c8fbe..8a287a13 100644 --- a/create-a-container/utils/proxmox-api.js +++ b/create-a-container/utils/proxmox-api.js @@ -81,7 +81,7 @@ class ProxmoxApi { * @param {string} path * @param {string} roles * @param {string|null} groups - * @param {boolean} propogate + * @param {boolean} propagate * @param {string|null} tokens * @param {string|null} users * @returns {Promise} @@ -94,6 +94,20 @@ class ProxmoxApi { ); } + /** + * Sync LDAP realm to update user list from LDAP server + * @param {string} realm - The realm name (e.g., 'ldap') + * @returns {Promise} + */ + async syncLdapRealm(realm = 'ldap') { + const response = await axios.post( + `${this.baseUrl}/api2/json/access/domains/${realm}/sync`, + {}, + this.options + ); + return response.data.data; + } + /** * Get the list of Proxmox Nodes * @returns {Promise} - The API response data diff --git a/create-a-container/views/containers/form.ejs b/create-a-container/views/containers/form.ejs index e1d57637..af72f82a 100644 --- a/create-a-container/views/containers/form.ejs +++ b/create-a-container/views/containers/form.ejs @@ -38,13 +38,8 @@ const breadcrumbLabel = isEdit ? 'Edit' : 'New'; <% } else { %>

@@ -319,7 +316,7 @@ function ProxmoxLaunchpad({Button, Icon, FaGithub, FaRocket, FaCheckCircle, FaCo - +