diff --git a/.github/actions/check-changes/action.yml b/.github/actions/check-changes/action.yml index 01e11934d..a4438bf2c 100644 --- a/.github/actions/check-changes/action.yml +++ b/.github/actions/check-changes/action.yml @@ -17,13 +17,13 @@ runs: run: | changes="" if [ "${{ github.event_name }}" = "pull_request" ]; then - echo "Checking pull request changes between HEAD and origin/${{ github.base_ref }}" + echo "Checking pull request changes between HEAD and origin/${GITHUB_BASE_REF}" - changes="$(git diff 'origin/${{ github.base_ref }}..HEAD' --name-only '${{ inputs.glob }}')" + changes="$(git diff "origin/${GITHUB_BASE_REF}..HEAD" --name-only "${INPUTS_GLOB}")" else echo "Checking changes made by the last (merge) commit" - changes="$(git diff HEAD^ --name-only '${{ inputs.glob }}')" + changes="$(git diff HEAD^ --name-only "${INPUTS_GLOB}")" fi if [ "$changes" != "" ]; then @@ -34,3 +34,5 @@ runs: else echo "changes=false" >> "$GITHUB_OUTPUT" fi + env: + INPUTS_GLOB: ${{ inputs.glob }} diff --git a/.github/actions/system-test/action.yml b/.github/actions/system-test/action.yml index 0102430d4..081ccd716 100644 --- a/.github/actions/system-test/action.yml +++ b/.github/actions/system-test/action.yml @@ -16,17 +16,17 @@ runs: using: composite steps: - name: Tune disk performance - uses: canonical/lxd/.github/actions/tune-disk-performance@main + uses: canonical/lxd/.github/actions/tune-disk-performance@main # zizmor: ignore[unpinned-uses] - name: Reclaim some space - uses: canonical/lxd/.github/actions/reclaim-disk-space@main + uses: canonical/lxd/.github/actions/reclaim-disk-space@main # zizmor: ignore[unpinned-uses] - name: Reclaim some memory - uses: canonical/lxd/.github/actions/reclaim-memory@main + uses: canonical/lxd/.github/actions/reclaim-memory@main # zizmor: ignore[unpinned-uses] - name: Disable Docker (GitHub runners) if: ${{ runner.environment == 'github-hosted' }} - uses: canonical/lxd/.github/actions/disable-docker@main + uses: canonical/lxd/.github/actions/disable-docker@main # zizmor: ignore[unpinned-uses] - name: "Disable br_netfilter" shell: bash @@ -39,6 +39,8 @@ runs: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false - name: Install Go uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 @@ -62,7 +64,7 @@ runs: - name: Sideload debug binaries shell: bash - run: | + run: | # zizmor: ignore[github-env] set -eux # Binaries to sideload @@ -81,7 +83,7 @@ runs: - name: "Free up the ephemeral disk" shell: bash - run: | + run: | # zizmor: ignore[github-env] set -eux if ! mountpoint --quiet /mnt; then @@ -129,7 +131,7 @@ runs: - name: "Prepare for system tests" shell: bash - run: | + run: | # zizmor: ignore[github-env] set -eux chmod +x ~ @@ -140,7 +142,7 @@ runs: export MICROCLOUD_SNAP_CHANNEL="${{ matrix.microcloud }}" cd test - if [ "${{ inputs.setup_testbed }}" = "false" ]; then + if [ "${INPUTS_SETUP_TESTBED}" = "false" ]; then echo "Skipping testbed setup" else sudo --preserve-env=GOCOVERDIR,DEBUG,GITHUB_ACTIONS,MICROCLOUD_DEBUG_PATH,MICROCLOUDD_DEBUG_PATH,SKIP_VM_LAUNCH,SNAPSHOT_RESTORE,TEST_STORAGE_SOURCE,TESTBED_READY,BASE_OS,LXD_SNAP_CHANNEL,MICROCEPH_SNAP_CHANNEL,MICROOVN_SNAP_CHANNEL,MICROCLOUD_SNAP_CHANNEL ./main.sh setup @@ -151,6 +153,8 @@ runs: echo "MICROCEPH_SNAP_CHANNEL=${MICROCEPH_SNAP_CHANNEL}" >> "${GITHUB_ENV}" echo "MICROOVN_SNAP_CHANNEL=${MICROOVN_SNAP_CHANNEL}" >> "${GITHUB_ENV}" echo "MICROCLOUD_SNAP_CHANNEL=${MICROCLOUD_SNAP_CHANNEL}" >> "${GITHUB_ENV}" + env: + INPUTS_SETUP_TESTBED: ${{ inputs.setup_testbed }} - name: Setup Terraform if: ${{ inputs.setup_terraform == 'true' }} @@ -165,12 +169,12 @@ runs: - name: "Run system tests (${{ matrix.suite }})" shell: bash - run: | + run: | # zizmor: ignore[github-env] set -eux chmod +x ~ # Handle coverage collection based on input - if [ "${{ inputs.collect_coverage }}" = "false" ]; then + if [ "${INPUTS_COLLECT_COVERAGE}" = "false" ]; then echo "Coverage collection disabled for this test" export GOCOVERDIR='' fi @@ -178,6 +182,8 @@ runs: cd test sudo --preserve-env=GOCOVERDIR,DEBUG,GITHUB_ACTIONS,MICROCLOUD_DEBUG_PATH,MICROCLOUDD_DEBUG_PATH,SKIP_VM_LAUNCH,SNAPSHOT_RESTORE,TEST_STORAGE_SOURCE,TESTBED_READY,BASE_OS,LXD_SNAP_CHANNEL,MICROCEPH_SNAP_CHANNEL,MICROOVN_SNAP_CHANNEL,MICROCLOUD_SNAP_CHANNEL ./main.sh ${{ matrix.suite }} echo "TIMESTAMP=$(date +%Y%m%d_%H%M%S_%N)" >> "${GITHUB_ENV}" + env: + INPUTS_COLLECT_COVERAGE: ${{ inputs.collect_coverage }} - name: Upload coverage data uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c0d737424..7eb732cb0 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -50,6 +50,8 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml index eb56bf3c8..496fb5d1f 100644 --- a/.github/workflows/security.yml +++ b/.github/workflows/security.yml @@ -6,7 +6,9 @@ on: permissions: contents: read - security-events: write # for uploading SARIF results to the security tab + +env: + KEV_URL: https://www.cisa.gov/sites/default/files/feeds/known_exploited_vulnerabilities.json concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }} @@ -19,109 +21,123 @@ defaults: jobs: trivy-repo: name: Trivy - Repository - runs-on: ubuntu-slim - if: ${{ github.ref_name == 'main' }} + runs-on: ubuntu-24.04 + permissions: + contents: read + security-events: write # for uploading SARIF results to the security tab + if: ${{ ( github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' ) && github.ref_name == 'main' && github.repository_owner == 'canonical' }} + env: + SARIF_FILE: trivy-${{ github.event.repository.name }}-repo-scan-results.sarif steps: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: ref: main - - - name: Install Trivy - uses: canonical/lxd/.github/actions/install-trivy@main - - - name: Download Trivy DB - id: db_download - run: trivy fs --download-db-only --cache-dir /home/runner/vuln-cache - continue-on-error: true - - - name: Cache Trivy vulnerability database - if: ${{ steps.db_download.outcome == 'success' }} - uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 - with: - path: /home/runner/vuln-cache - key: trivy-cache-${{ github.run_id }} - - - name: Use previously downloaded database instead - if: ${{ steps.db_download.outcome == 'failure' }} - uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 - with: - path: /home/runner/vuln-cache - key: download-failed # Use a non existing key to fallback to restore-keys - restore-keys: trivy-cache- + persist-credentials: false - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0 + with: + scan-type: fs + scan-ref: . + scanners: vuln,secret,misconfig + format: sarif + severity: LOW,MEDIUM,HIGH,CRITICAL + output: ${{ env.SARIF_FILE }} + + - name: Tag KEV alerts run: | - trivy fs --skip-db-update \ - --scanners vuln,secret,misconfig \ - --format sarif \ - --cache-dir /home/runner/vuln-cache \ - --severity LOW,MEDIUM,HIGH,CRITICAL \ - --output trivy-microcloud-repo-scan-results.sarif . + set -euo pipefail + curl -s --compressed --proto '=https' --tlsv1.3 --fail --max-time 30 -o kev.json "${KEV_URL}" + kev_ids="$(jq -r '.vulnerabilities[].cveID' kev.json)" + jq --exit-status --arg ids "$kev_ids" '($ids | split("\n")) as $id_list | .runs[].tool.driver.rules[] |= ( + if (.id as $id | $id_list | index($id)) then + .shortDescription.text |= . + " (KEV)" + else + . + end + )' "${SARIF_FILE}" > trivy-modified.sarif + mv trivy-modified.sarif "${SARIF_FILE}" - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v3.29.5 + uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 with: - sarif_file: "trivy-microcloud-repo-scan-results.sarif" + sarif_file: ${{ env.SARIF_FILE }} sha: ${{ github.sha }} ref: refs/heads/main trivy-snap: name: Trivy - Snap - runs-on: ubuntu-slim - if: ${{ github.ref_name == 'main' }} + runs-on: ubuntu-24.04 needs: trivy-repo + permissions: + contents: read + security-events: write # for uploading SARIF results to the security tab + if: ${{ ( github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' ) && github.ref_name == 'main' && github.repository_owner == 'canonical' }} strategy: matrix: include: - channel: "3/edge" branch: "main" + version: "3" - channel: "2/stable" branch: "v2-edge" + version: "2" - channel: "1/stable" branch: "v1-edge" + version: "1" steps: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: ref: ${{ matrix.branch }} + persist-credentials: false - - name: Install Trivy - uses: canonical/lxd/.github/actions/install-trivy@main - - - name: Restore cached Trivy vulnerability database - uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 - with: - path: /home/runner/vuln-cache - key: download-failed # Use a non existing key to fallback to restore-keys - restore-keys: trivy-cache- - - - name: Install snapd - run: | - sudo apt-get install --no-install-recommends -y snapd + - name: Resolve branch HEAD SHA + id: branch-sha + run: echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" - name: Download snap for scan + env: + SNAP_NAME: ${{ github.event.repository.name }} run: | - snap download microcloud --channel=${{ matrix.channel }} - unsquashfs ./microcloud*.snap + snap download "${SNAP_NAME}" --channel=${{ matrix.version }}/stable --cohort="+" + unsquashfs ./${SNAP_NAME}*.snap - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0 + with: + scan-type: rootfs + scan-ref: squashfs-root + scanners: vuln,secret,misconfig + format: sarif + severity: LOW,MEDIUM,HIGH,CRITICAL + output: ${{ matrix.version }}-stable.sarif + + - name: Flag snap scanning alerts and tag KEV alerts run: | - trivy rootfs --skip-db-update \ - --scanners vuln,secret,misconfig \ - --format sarif \ - --cache-dir /home/runner/vuln-cache \ - --severity LOW,MEDIUM,HIGH,CRITICAL \ - --output snap-scan-results.sarif squashfs-root - - - name: Flag snap scanning alerts - run: | - jq '.runs[].tool.driver.rules[] |= (.shortDescription.text |= "Snap scan - " + .)' snap-scan-results.sarif > tmp.json - mv tmp.json snap-scan-results.sarif + set -euo pipefail + # Download KEV catalog + curl -s --compressed --proto '=https' --tlsv1.3 --fail --max-time 30 -o kev.json "${KEV_URL}" + kev_ids="$(jq -r '.vulnerabilities[].cveID' kev.json)" + # Modify the SARIF file to both add "Snap scan - " prefix and tag KEV alerts + jq --exit-status --arg ids "$kev_ids" ' + ($ids | split("\n")) as $id_list | + .runs[].tool.driver.rules[] |= ( + # First add the Snap scan prefix to all entries + .shortDescription.text = "Snap scan - " + .shortDescription.text | + # Then add KEV tag if applicable + if (.id as $id | $id_list | index($id)) then + .shortDescription.text |= . + " (KEV)" + else + . + end + )' ${{ matrix.version }}-stable.sarif > ${{ matrix.version }}-modified.sarif + mv ${{ matrix.version }}-modified.sarif ${{ matrix.version }}-stable.sarif - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v3.29.5 + uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 with: - sarif_file: "snap-scan-results.sarif" - sha: ${{ github.sha }} + sarif_file: ${{ matrix.version }}-stable.sarif + sha: ${{ steps.branch-sha.outputs.sha }} ref: refs/heads/${{ matrix.branch }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ff9c4e0bb..bae93195f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -41,6 +41,7 @@ jobs: uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 + persist-credentials: false - name: Check for changes uses: ./.github/actions/check-changes @@ -59,9 +60,7 @@ jobs: with: # A non-shallow clone is needed for the Differential ShellCheck fetch-depth: 0 - - - name: Require GHA pinning - uses: canonical/lxd/.github/actions/require-gha-pinning@main + persist-credentials: false - name: Install Go uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 @@ -180,6 +179,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false - name: System test uses: ./.github/actions/system-test @@ -202,7 +203,9 @@ jobs: matrix: suite: ["upgrade"] os: - - "22.04" + # Temporarily disable the upgrade test on 22.04 due to issues with latest MicroCeph and LXD using CephFS. + # See https://chat.canonical.com/canonical/pl/ix7oj94web8wmm4wdt7rqq1etc. + #- "22.04" - "24.04" microceph: - "reef/stable" @@ -213,6 +216,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false - name: System test uses: ./.github/actions/system-test @@ -230,6 +235,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false - name: Install Go uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 @@ -245,9 +252,9 @@ jobs: - name: Extract coverage data run: | - find ${{ env.GOCOVERDIR }}/micro*/cover/ -type f -exec mv {} ${{ env.GOCOVERDIR }} \; - rm -rf ${{ env.GOCOVERDIR }}/micro* - ls -la ${{ env.GOCOVERDIR }} + find "${GOCOVERDIR}"/micro*/cover/ -type f -exec mv {} "${GOCOVERDIR}" \; + rm -rf "${GOCOVERDIR}"/micro* + ls -la "${GOCOVERDIR}" - name: Download system test dependencies uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 @@ -286,7 +293,7 @@ jobs: doc-tests: name: Documentation - uses: canonical/documentation-workflows/.github/workflows/documentation-checks.yaml@main + uses: canonical/documentation-workflows/.github/workflows/documentation-checks.yaml@aaeaf091e8f55145184ad897cb9834f224bd31de # main with: working-directory: './doc' makefile: 'Makefile' @@ -309,8 +316,10 @@ jobs: steps: - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false - - uses: canonical/lxd/.github/actions/lp-snap-build@main + - uses: canonical/lxd/.github/actions/lp-snap-build@main # zizmor: ignore[unpinned-uses] with: ssh-key: "${{ secrets.LAUNCHPAD_LXD_BOT_KEY}}" diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index 879dc5dc4..aee200666 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -1,6 +1,6 @@ name: Triaging on: - pull_request_target: + pull_request_target: # zizmor: ignore[dangerous-triggers] issues: types: - labeled diff --git a/doc/.sphinx/_integration/add_config.py b/doc/.sphinx/_integration/add_config.py index c0c7d1d03..0b7d5ab94 100644 --- a/doc/.sphinx/_integration/add_config.py +++ b/doc/.sphinx/_integration/add_config.py @@ -12,6 +12,7 @@ if project == "LXD": html_baseurl = "https://documentation.ubuntu.com/lxd/stable-5.21/" html_js_files.append('rtd-search.js') + html_css_files = globals().get('html_css_files', []) + ['override-header.css'] tags.add('integrated') elif project == "MicroCeph": html_baseurl = "https://canonical-microceph.readthedocs-hosted.com/en/latest/" diff --git a/doc/Makefile b/doc/Makefile index 22d58dd18..2818b750d 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -51,7 +51,7 @@ integrate: cp .sphinx/_integration/microcloud.html integration/microcloud/_templates/header.html cp .sphinx/_integration/header.css integration/microcloud/_static/ cp .sphinx/_integration/lxd.html integration/lxd/doc/_templates/header.html - cp .sphinx/_integration/header.css integration/lxd/doc/_static/ + cp .sphinx/_integration/override-header.css integration/lxd/doc/_static/ cp .sphinx/_integration/microceph.html integration/microceph/docs/.sphinx/_templates/header.html cp .sphinx/_integration/override-header.css integration/microceph/docs/.sphinx/_static/ cp .sphinx/_integration/microovn.html integration/microovn/docs/.sphinx/_templates/header.html diff --git a/doc/custom_conf.py b/doc/custom_conf.py index 77550a128..7a4518901 100644 --- a/doc/custom_conf.py +++ b/doc/custom_conf.py @@ -279,23 +279,29 @@ 'lxd': ('https://documentation.ubuntu.com/lxd/stable-5.21/', None), 'microceph': ('https://canonical-microceph.readthedocs-hosted.com/en/v19.2.0-squid/', None), 'microovn': ('https://documentation.ubuntu.com/microcloud/v2/microovn/', None), - 'ceph': ('https://docs.ceph.com/en/latest/', None), } elif ('READTHEDOCS' in os.environ) and (os.environ['READTHEDOCS'] == 'True'): intersphinx_mapping = { 'lxd': (os.environ['PATH_PREFIX'] + 'lxd/', os.environ['READTHEDOCS_OUTPUT'] + 'html/lxd/objects.inv'), 'microceph': (os.environ['PATH_PREFIX'] + 'microceph/', os.environ['READTHEDOCS_OUTPUT'] + 'html/microceph/objects.inv'), 'microovn': (os.environ['PATH_PREFIX'] + 'microovn/', os.environ['READTHEDOCS_OUTPUT'] + 'html/microovn/objects.inv'), - 'ceph': ('https://docs.ceph.com/en/latest/', None) } else: intersphinx_mapping = { 'lxd': ('/lxd/', '_build/lxd/objects.inv'), 'microceph': ('/microceph/', '_build/microceph/objects.inv'), 'microovn': ('/microovn/', '_build/microovn/objects.inv'), - 'ceph': ('https://docs.ceph.com/en/latest/', None) } +# Add intersphinx mappings for docs sets not part of the MicroCloud integrated docs here: + +base_intersphinx = { + 'ceph': ('https://docs.ceph.com/en/latest/', None), + 'snap': ('https://snapcraft.io/docs/', None), +} + +intersphinx_mapping.update(base_intersphinx) + # Define a :center: role that can be used to center the content of table cells. rst_prolog = ''' .. role:: center diff --git a/doc/explanation/index.md b/doc/explanation/index.md index 44bec71ac..9e3375913 100644 --- a/doc/explanation/index.md +++ b/doc/explanation/index.md @@ -7,7 +7,7 @@ myst: (explanation)= # Explanation -The explanatory guides in this section introduce you to the concepts used in MicroCloud and help you understand how things fit together. +The explanatory guides in this section help you understand how MicroCloud fits together with LXD, Ceph, and OVN. Use these guides to learn about MicroCloud's approach to clustering, networking, storage, security, and more. ```{toctree} :maxdepth: 1 diff --git a/doc/explanation/initialization.md b/doc/explanation/initialization.md index 2852700a0..377049322 100644 --- a/doc/explanation/initialization.md +++ b/doc/explanation/initialization.md @@ -10,18 +10,16 @@ See {ref}`howto-initialize` for instructions on how to set up MicroCloud. (trust-establishment-session)= ## Trust establishment session -To allow several instances of MicroCloud joining the final cluster, in both the interactive and non-interactive method each instance -is running one half of the trust establishment session to trust the other side. +To allow several instances of MicroCloud to join the final cluster, in both the interactive and non-interactive method each instance is running one half of the trust establishment session to trust the other side. Each trust establishment session has one initiator and one to many joiners. -In case of the interactive mode the side which runs the `microcloud init` command becomes the initiator. -The other side becomes the joiner by running `microcloud join`. -In the non-interactive mode the initiator is being defined either using the `initiator` or `initiator_address` configuration key. +In interactive mode, the side that runs the `microcloud init` command becomes the initiator, and the other sides become joiners by running `microcloud join`. +In non-interactive mode, the initiator is defined by either the `initiator` or `initiator_address` configuration key. (automatic-server-detection)= ## Automatic server detection -If required MicroCloud uses multicast discovery to automatically detect a so called initiator on the network. +If required, MicroCloud uses multicast discovery to automatically detect a so-called initiator on the network. This method works in physical networks, but it is usually not supported in a cloud environment. Instead you can specify the address of the initiator instead to not require using multicast. diff --git a/doc/explanation/microcloud.md b/doc/explanation/microcloud.md index bccef5e99..b9c3371bf 100644 --- a/doc/explanation/microcloud.md +++ b/doc/explanation/microcloud.md @@ -19,16 +19,16 @@ At the end of this, you’ll have an OVN cluster, a Ceph cluster, and a LXD clus MicroCloud sets up a LXD cluster. You can use the {command}`microcloud cluster` command to show information about the cluster members, or to remove specific members. -Apart from that, you can use LXD commands to manage the cluster. In the LXD documentation, see {ref}`lxd:clustering` for how-to guides on cluster management, or {ref}`lxd:exp-clusters` for an explanation of LXD clusters. +Apart from that, you can use LXD commands to manage the cluster. In the LXD documentation, refer to {ref}`lxd:clustering` for how-to guides on cluster management, or {ref}`lxd:exp-clusters` for an explanation of LXD clusters. (exp-microcloud-microovn)= ## MicroOVN networking By default, MicroCloud uses MicroOVN for networking, which is a minimal wrapper around OVN (Open Virtual Network). -- For an overview of MicroCloud networking with OVN, see: {ref}`exp-networking`. -- For networking requirements, see: {ref}`reference-requirements-network`. -- To learn how to use a dedicated underlay network, see: {ref}`howto-ovn-underlay`. +- For an overview of MicroCloud networking with OVN, refer to {ref}`exp-networking`. +- For networking requirements, refer to {ref}`reference-requirements-network`. +- To learn how to use a dedicated underlay network, refer to {ref}`howto-ovn-underlay`. (exp-microcloud-storage)= ## Storage @@ -52,11 +52,11 @@ To use distributed storage, you must have at least three disks (attached to at l You can securely access a browser-based graphical UI for managing your MicroCloud deployment. -For details, see: {ref}`howto-ui`. +For details, refer to {ref}`howto-ui`. ```{admonition} Other client interfaces :class: tip -You can also manage MicroCloud through the {ref}`command line ` or LXD's {ref}`lxd:rest-api`. +You can also manage MicroCloud through the {ref}`command line ` or LXD's {ref}`lxd:rest-api`. ``` (exp-microcloud-scale)= @@ -66,7 +66,7 @@ MicroCloud is designed to be replicable at scale, enabling you to create consist Once deployed, each MicroCloud component (LXD, MicroCeph, MicroOVN) is designed to scale horizontally, meaning you can add more machines to increase capacity, performance, and redundancy. When new cluster members are added, these components automatically integrate them as control plane, storage, and networking peers without requiring manual reconfiguration. This includes {doc}`automatic failure domain adjustment ` for MicroCeph. -Furthermore, MicroCloud's snap-based updates help keep deployments consistent at scale. By updating cluster members to the latest version available on the LTS snap channel, you can ensure that all machines are using the same version with the latest security updates and bugfixes. See: {ref}`ref-releases-snaps`. +Furthermore, MicroCloud's snap-based updates help keep deployments consistent at scale. By updating cluster members to the latest version available on the LTS snap channel, you can ensure that all machines are using the same version with the latest security updates and bugfixes. Refer to {ref}`ref-releases-snaps` to learn more. (exp-microcloud-ha)= ## High availability @@ -75,9 +75,9 @@ MicroCloud achieves high availability (HA) through its distributed architecture: LXD provides control plane HA by allowing each cluster member to manage the cluster. If one member goes down, another can serve requests in its place. For data plane HA, LXD also provides automatic {ref}`cluster healing `. For more information, refer to the LXD documentation on {ref}`lxd:clusters-high-availability`. -Using distributed storage with MicroCeph means that data is replicated across the cluster, so even if one member goes offline, its data remains available on others. Ceph's {doc}`Controlled Replication Under Scalable Hashing (CRUSH) algorithm ` automatically redistributes data when parts of the system fail, maintaining availability. Also see: the {ref}`MicroCloud storage requirements for high availability ` and the {doc}`MicroCeph documentation on its failure domain management `. +Using distributed storage with MicroCeph means that data is replicated across the cluster, so even if one member goes offline, its data remains available on others. Ceph's {doc}`Controlled Replication Under Scalable Hashing (CRUSH) algorithm ` automatically redistributes data when parts of the system fail, maintaining availability. For more information, refer to the {ref}`MicroCloud storage requirements for high availability ` and the {doc}`MicroCeph documentation on its failure domain management `. -MicroOVN brings a distributed overlay network, meaning that switching and routing functions are not centralized on any single cluster member. Each member hosts its own virtual switch, avoiding a single point of failure for internal, intra-cluster traffic: every member can continue forwarding packets even if others are offline. External connectivity relies on a virtual router that is active on one member at a time; if that member fails, another takes over to keep uplink connectivity available. For more information, see: {ref}`exp-networking-ovn`. +MicroOVN brings a distributed overlay network, meaning that switching and routing functions are not centralized on any single cluster member. Each member hosts its own virtual switch, avoiding a single point of failure for internal, intra-cluster traffic: every member can continue forwarding packets even if others are offline. External connectivity relies on a virtual router that is active on one member at a time; if that member fails, another takes over to keep uplink connectivity available. For more information, refer to {ref}`exp-networking-ovn`. (exp-microcloud-access-control)= ## Fine-grained access control and multi-tenancy diff --git a/doc/explanation/security.md b/doc/explanation/security.md index 9e1b3c8df..64a638ff1 100644 --- a/doc/explanation/security.md +++ b/doc/explanation/security.md @@ -17,9 +17,9 @@ MicroCloud runs on Ubuntu and benefits from all [Ubuntu platform security measur (exp-security-snaps)= ## Snaps -MicroCloud and its components are distributed as [snaps](https://snapcraft.io/docs), which enhances security through providing a confined environment with a streamlined update mechanism. Both LTS and feature channels receive regular security updates through Canonical’s official infrastructure. +MicroCloud and its components are distributed as [snaps](https://snapcraft.io/docs), which enhances security by providing a confined environment with a streamlined update mechanism. Both LTS and feature channels receive regular security updates through Canonical’s official infrastructure. -All snaps are digitally signed using [assertions](https://snapcraft.io/docs/assertions) to guarantee authenticity and integrity. +All snaps are digitally signed using {ref}`assertions ` to guarantee authenticity and integrity. (exp-security-reporting)= ## Security reporting and disclosure diff --git a/doc/how-to/index.md b/doc/how-to/index.md index 8f050f8a7..441a469dc 100644 --- a/doc/how-to/index.md +++ b/doc/how-to/index.md @@ -7,7 +7,13 @@ myst: (howto)= # How-to guides -These MicroCloud how-to guides cover key operations and processes, including installation and configuration instructions, how to add and remove cluster members, update and upgrade procedures, and more. +These MicroCloud how-to guides cover key operations and processes. + +## Initial setup + +Follow these guides to install MicroCloud in a testing or production environment +and initialize MicroCloud through interactive or automated configuration +processes. You can also set up access to the MicroCloud UI. ```{toctree} :maxdepth: 1 @@ -15,15 +21,53 @@ These MicroCloud how-to guides cover key operations and processes, including ins Install MicroCloud Initialize MicroCloud Access the UI -Automate a test deployment with Terraform +``` + +## Configure services + +You can configure storage with MicroCeph and networking with MicroOVN during the +initialization process, or you can add a service later. + +```{toctree} +:maxdepth: 1 + Configure Ceph networking Configure OVN underlay -Work with MicroCloud -Manage cluster members +Add a service +``` + +## Manage clusters and cluster members + +As your needs change, follow these steps to manage your clusters and cluster +members and keep your deployment up to date. + +```{toctree} +:maxdepth: 1 + +Manage cluster members +Recover MicroCloud Update and upgrade Manage the snaps -Recover MicroCloud -Add a service +``` + +## Automated deployment with Terraform + +Follow this guide to automate the deployment of MicroCloud with Terraform. + +```{toctree} +:maxdepth: 1 + +Deploy a MicroCloud test environment with Terraform +``` + +## Engage with us + +Find out how to get community and commercial support, and learn how to +contribute to the MicroCloud project. + +```{toctree} +:maxdepth: 1 + Get support Contribute to MicroCloud ``` diff --git a/doc/how-to/install.md b/doc/how-to/install.md index 466412711..6a3f2fa77 100644 --- a/doc/how-to/install.md +++ b/doc/how-to/install.md @@ -19,7 +19,7 @@ A physical or virtual machine intended for use as a MicroCloud cluster member mu - Networking: - Fixed IP addresses (DHCP not supported) - At least two network interfaces per cluster member: one for intra-cluster communication and one for external connectivity to the uplink network - - Partially or fully disaggregated networking setups require more interfaces; see: {ref}`howto-ceph-networking` + - Partially or fully disaggregated networking setups require more interfaces; refer to {ref}`howto-ceph-networking` to learn more - To use a {ref}`dedicated underlay network for OVN traffic `, an additional interface per cluster member is required - Uplink network must support both broadcast and multicast - Intra-cluster interface must have IPs assigned; external connectivity interface (to uplink) must not have any IPs assigned @@ -47,7 +47,7 @@ These requirements are in addition to those listed in the General tab. - Memory: - Minimum 8 GiB RAM per cluster member - Networking: - - It is possible to use a single network interface per cluster member. However, such a configuration is neither supported nor recommended. For details, see: {ref}`reference-requirements-network-interface-single`. + - It is possible to use a single network interface per cluster member. However, such a configuration is neither supported nor recommended. For details, refer to {ref}`reference-requirements-network-interface-single`. - Storage: - If high availability is required, use distributed storage with: - a minimum of 3 cluster members @@ -79,7 +79,7 @@ These requirements are in addition to those listed in the General tab. ```` ````` -For detailed information, see: {ref}`reference-requirements`. +For detailed information, refer to {ref}`reference-requirements`. ## Installation @@ -138,7 +138,7 @@ A channel includes both a {ref}`track ` (such as `2 MicroCloud's component snaps must use tracks that correspond to the same MicroCloud release within the {ref}`matrix of compatible versions `. -For production deployments, use the `stable` risk level for all snaps. For testing or development, you might use a different risk level for some snaps. See {ref}`ref-snaps-microcloud-risk` for more information. +For production deployments, use the `stable` risk level for all snaps. For testing or development, you might use a different risk level for some snaps. Refer to {ref}`ref-snaps-microcloud-risk` for more information. To specify a different channel, use the `--channel` flag at installation: @@ -154,9 +154,9 @@ sudo snap install lxd --cohort="+" --channel=5.21/edge Even if the risk level for a snap differs from the other snaps, the same channel must be used for that snap on all cluster members. For example, if you use the `5.21/edge` channel for the LXD snap, then _all_ cluster members must use that channel for the LXD snap. -For details about the MicroCloud snap channels, see: {ref}`ref-snaps-microcloud-channels`. +For details about the MicroCloud snap channels, refer to {ref}`ref-snaps-microcloud-channels`. (howto-install-hold-updates)= ## Hold updates -When a new release is published to a snap channel, installed snaps following that channel update automatically by default. This is undesired behavior for MicroCloud and its components, and you should override this default behavior by holding updates. See: {ref}`howto-update-hold`. +When a new release is published to a snap channel, installed snaps following that channel update automatically by default. This is undesired behavior for MicroCloud and its components, and you should override this default behavior by holding updates. Refer to {ref}`howto-update-hold` for details. diff --git a/doc/how-to/members_manage.md b/doc/how-to/members_manage.md index 27b9c50fa..500d9d7e4 100644 --- a/doc/how-to/members_manage.md +++ b/doc/how-to/members_manage.md @@ -1,11 +1,11 @@ (howto-members-manage)= # How to manage MicroCloud cluster members -The how-to guides in this section provide instructions for managing MicroCloud cluster members, which can be physical or virtual machines. These include how to add, remove, and shut down cluster members. +Follow these guides to add cluster members interactively or with a preseed configuration file, and to safely remove or shut down cluster members. ```{toctree} :maxdepth: 1 Add a cluster member Remove a cluster member Shut down a cluster member -``` \ No newline at end of file +``` diff --git a/doc/how-to/recover.md b/doc/how-to/recover.md index ecc3ece2d..b8ec1f1e1 100644 --- a/doc/how-to/recover.md +++ b/doc/how-to/recover.md @@ -18,11 +18,12 @@ accessible in order to perform database operations. If a cluster has less than a quorum of voters up and accessible, then database operations will no longer be possible on the entire cluster. -If the loss of quorum is temporary (e.g. some members temporarily lose power), -database operations will be restored when the offline members come back online. +If the loss of quorum is temporary (for example, some members temporarily lose +power), database operations will be restored when the offline members come back +online. This document describes how to recover database access if the offline members -have been lost without the possibility of recovery (e.g. disk failure). +have been lost without the possibility of recovery (for example, disk failure). ## Recovery procedure diff --git a/doc/how-to/snaps.md b/doc/how-to/snaps.md index ed611f1bf..bb50cc55f 100644 --- a/doc/how-to/snaps.md +++ b/doc/how-to/snaps.md @@ -3,7 +3,7 @@ Manage MicroCloud and its components (LXD, MicroCeph, and MicroOVN) through their snap packages. -For the installation guide, see: {ref}`howto-install`. For details about the snaps, including {ref}`supported and compatible releases `, {ref}`tracks `, and {ref}`release processes `, see: {ref}`ref-releases-snaps`. +For the installation guide, refer to {ref}`howto-install`. For details about the snaps, including {ref}`supported and compatible releases `, {ref}`tracks `, and {ref}`release processes `, refer to {ref}`ref-releases-snaps`. (howto-snap-info)= ## View snap information @@ -36,7 +36,7 @@ The first part of the version string corresponds to the release (in this sample, (howto-snap-daemon)= ## Manage the MicroCloud daemon -Installing the MicroCloud snap creates the MicroCloud daemon as a [snap service](https://snapcraft.io/docs/how-to-guides/manage-snaps/control-services/). Use the following `snap` commands to manage this daemon. +Installing the MicroCloud snap creates the MicroCloud daemon as a snap service. Use the following `snap` commands to manage this daemon. To view the status of the daemon, run: @@ -62,7 +62,7 @@ To restart the daemon, run: sudo snap restart microcloud ``` -For more information about managing snap services, visit [Control services](https://snapcraft.io/docs/how-to-guides/manage-snaps/control-services/) in the Snap documentation. +For more information about managing snap services, visit {ref}`snap:how-to-guides-manage-snaps-control-services` in the Snap documentation. ## Related topics diff --git a/doc/how-to/support.md b/doc/how-to/support.md index f9071c9ec..e9423e2a5 100644 --- a/doc/how-to/support.md +++ b/doc/how-to/support.md @@ -1,7 +1,7 @@ (howto-support)= # How to get support -For information about supported and compatible releases of MicroCloud and its components, see: {ref}`ref-releases-matrix`. +Refer to {ref}`ref-releases-matrix` for the matrix of compatible releases across MicroCloud and its components. ## Community support @@ -27,5 +27,5 @@ You can find additional resources on the [MicroCloud website](https://canonical. LTS releases of MicroCloud receive standard support for five years, which means they receive continuous updates. Commercial support for MicroCloud is provided as part of [Ubuntu Pro](https://ubuntu.com/pro) (both Infra-only and full Ubuntu Pro). See the [full service description](https://ubuntu.com/legal/ubuntu-pro-description) for details. -Managed solutions and firefighting support are also available for MicroCloud deployments. See: [Managed services](https://ubuntu.com/managed). +Managed solutions and firefighting support are also available for MicroCloud deployments. Visit [Managed services](https://ubuntu.com/managed) for details. diff --git a/doc/how-to/update_upgrade.md b/doc/how-to/update_upgrade.md index 6dc12e2f2..2138865b5 100644 --- a/doc/how-to/update_upgrade.md +++ b/doc/how-to/update_upgrade.md @@ -1,7 +1,7 @@ (howto-update-upgrade)= # How to update and upgrade -The snaps for MicroCloud, LXD, MicroCeph, and MicroOVN installed on MicroCloud cluster members must always run the same version of each snap. Thus, when the snaps on any cluster member are refreshed, they must also be refreshed on all other cluster members. See {ref}`howto-update-upgrade-order` for the recommended order. +The snaps for MicroCloud, LXD, MicroCeph, and MicroOVN installed on MicroCloud cluster members must always run the same version of each snap. Thus, when the snaps on any cluster member are refreshed, they must also be refreshed on all other cluster members. Refer to {ref}`howto-update-upgrade-order` for the recommended order. If the cluster members' snaps are not synchronized, MicroCloud continues to function as normal in regards to its data plane. However, the configuration of its control plane cannot be altered. For example, you cannot add or remove cluster members or instances. To prevent automatic updates from causing snaps to run different versions, make sure to always {ref}`hold updates ` as well as {ref}`synchronize updates using the cohort flag `. @@ -10,12 +10,12 @@ Performing an update or upgrade requires going through the list of snaps one aft (howto-update-upgrade-backup)= ## Back up data Before performing an update or upgrade, make sure to back up your data to prevent any data loss in case of failure. -See the following backup guides for each of the snaps: +Consult the following backup guides for each of the snaps: -* {doc}`How to backup MicroCeph ` -* {ref}`How to backup LXD ` +* {doc}`How to back up MicroCeph ` +* {ref}`How to back up LXD ` -In case of error, see {ref}`howto-recover` for troubleshooting details. +In case of error, refer to {ref}`howto-recover` for troubleshooting details. (howto-update-upgrade-running-instances)= ## Keep instances running during an update or upgrade @@ -27,12 +27,12 @@ For the LXD snap, this won't affect the running instances. However, for the Micr To avoid such possible effects entirely, use the live migration approach described below when updating or upgrading the MicroCeph and MicroOVN snaps. - Use virtual machines (VMs) instead of system containers for crucial workloads; in general, containers cannot be live-migrated. -- Each VM must be pre-configured for live migration. See: {ref}`lxd:live-migration` for information on the required configurations. +- Each VM must be pre-configured for live migration. Refer to {ref}`lxd:live-migration` for information on the required configurations. - Before you update or upgrade a cluster member, use the {ref}`cluster evacuate ` operation to migrate all instances on the host to other members in the same cluster. - Once the update or upgrade is complete, use the {ref}`cluster restore ` operation to migrate all evacuated instances back to the original host. - The evacuate and restore operations can live-migrate any VMs that are configured to allow it. If any instances on the cluster member are ineligible for live migration (such as a container, or a VM that is not configured for live migration), then during both evacuation and restoration, those instances are stopped, migrated, and restarted. -For more information on the cluster evacuate and restore operations, see: {ref}`lxd:cluster-evacuate-restore`. +For more information on the cluster evacuate and restore operations, refer to {ref}`lxd:cluster-evacuate-restore`. (howto-update-upgrade-order)= ## Update and upgrade order @@ -49,7 +49,7 @@ Update the same component's snap on all cluster members before moving to the nex (howto-update-sync)= ## Synchronize updates using the cohort flag -Even with manual snap updates, versions can fall out of sync; see {ref}`ref-snaps-updates` for details. +Even with manual snap updates, versions can fall out of sync; refer to {ref}`ref-snaps-updates` for details. To ensure synchronized updates, the `--cohort="+"` flag must be set on all cluster members. You only need to set this flag once per snap on each cluster member, either during {ref}`installation `, or the first time you {ref}`perform a manual update `. @@ -112,7 +112,7 @@ sudo snap refresh --hold lxd microceph microovn microcloud Then you can perform {ref}`manual updates ` on a schedule that you control. -For detailed information about holds, see: [Pause or stop automatic updates](https://snapcraft.io/docs/managing-updates#p-32248-pause-or-stop-automatic-updates) in the Snap documentation. +The {ref}`snap:how-to-guides-work-with-snaps-manage-updates` page in the Snap documentation provides details about how to pause or stop automatic updates. (howto-update)= ## Update MicroCloud @@ -120,7 +120,7 @@ For detailed information about holds, see: [Pause or stop automatic updates](htt ```{admonition} Users of the 1 track :class: important The `1` MicroCloud track reached {abbr}`EOL (End of Life)` at the end of April 2025. -If you use this track, make sure to upgrade to the `2` LTS track, as no further updates will be released to the `1` track. See the {ref}`howto-upgrade` guide below for more information. Specific command syntax is provided in {ref}`howto-upgrade-microcloud-full-example`. +If you use this track, make sure to upgrade to the `2` LTS track, as no further updates will be released to the `1` track. Refer to the {ref}`howto-upgrade` guide below for more information. Specific command syntax is provided in {ref}`howto-upgrade-microcloud-full-example`. ``` Updating MicroCloud allows access to the latest set of features and fixes in the tracked channels for the various snaps. During an update, snaps are refreshed to the most up-to-date version for their tracked channel. This does not introduce breaking changes. @@ -178,7 +178,7 @@ sudo microcloud status ```{note} The status command was introduced in MicroCloud version 2. -See {ref}`howto-upgrade` on how to upgrade to another track. +Refer to {ref}`howto-upgrade` on how to upgrade to another track. ``` (howto-upgrade)= @@ -188,7 +188,7 @@ Upgrading MicroCloud means to switch to a newer track with major improvements an During an upgrade, the snaps channel will be switched to another track. This might introduce breaking changes for MicroCloud and its components and should be done with care. -See {ref}`howto-update` for regular non-breaking updates. +Refer to {ref}`howto-update` for regular non-breaking updates. Before you update, ensure that all snaps are {ref}`synchronized using the cohort flag `. The `--cohort` flag is only necessary if the snap is not `in-cohort`. @@ -249,7 +249,7 @@ sudo microcloud status Use the commands below to upgrade all components from MicroCloud 1 to MicroCloud 2. -Omit the `--cohort="+"` flag if the MicroCeph snap is already `in-cohort`. If unsure, see: {ref}`howto-update-sync`. +Omit the `--cohort="+"` flag if the MicroCeph snap is already `in-cohort`. If unsure, refer to {ref}`howto-update-sync`. First, upgrade MicroCeph on all cluster members, one by one: @@ -288,10 +288,10 @@ If you manage a large MicroCloud deployment and you need absolute control over w The Enterprise Store Proxy is a separate application that sits between the snap client command on your machines and the snap store. You can configure the Enterprise Store Proxy to make only specific snap revisions available for installation. -See the [Enterprise Store Proxy documentation](https://documentation.ubuntu.com/enterprise-store/) for information about how to install and register the Enterprise Store Proxy. +Visit the [Enterprise Store Proxy documentation](https://ubuntu.com/enterprise-store/docs/) for information about how to install and register the Enterprise Store Proxy. After setting it up, configure the snap clients on all cluster members to use the proxy. -See [Configuring devices](https://documentation.ubuntu.com/enterprise-store/main/how-to/devices/) for instructions. +Refer to [Configuring devices](https://ubuntu.com/enterprise-store/docs/how-to/devices/) for instructions. You can then configure the Enterprise Store Proxy to override the revisions for the snaps that are needed for MicroCloud: @@ -312,4 +312,4 @@ How-to guides: Reference: -- {ref}`ref-releases-snaps` \ No newline at end of file +- {ref}`ref-releases-snaps` diff --git a/doc/index.md b/doc/index.md index 30c75ff50..82190df4a 100644 --- a/doc/index.md +++ b/doc/index.md @@ -13,7 +13,7 @@ Deploy a low-touch, open source cloud platform in minutes with MicroCloud. MicroCloud creates a lightweight cluster of machines that operates as a scalable private cloud. It combines LXD for virtualization, MicroCeph for distributed storage, and MicroOVN for networking—all automatically configured by the [MicroCloud snap](https://snapcraft.io/microcloud) for {ref}`reproducible, scalable deployments `. -With MicroCloud, you can eliminate the complexity of manual setup and quickly benefit from {ref}`high availability `, {ref}`streamlined security updates `, and {ref}`fine-grained access control for multi-tenancy `. Cluster members can run {ref}`full virtual machines or lightweight system containers ` with bare-metal performance. Manage it through your choice of client interfaces, including a {ref}`graphical UI ` and {ref}`CLI `. +With MicroCloud, you can eliminate the complexity of manual setup and quickly benefit from {ref}`high availability `, {ref}`streamlined security updates `, and {ref}`fine-grained access control for multi-tenancy `. Cluster members can run {ref}`full virtual machines or lightweight system containers ` with bare-metal performance. Manage it through your choice of client interfaces, including a {ref}`graphical UI ` and {ref}`CLI `. MicroCloud is designed for small-scale private clouds and hybrid cloud extensions. Its efficiency and simplicity also make it an excellent choice for edge computing, test labs, and other resource-constrained use cases. @@ -25,36 +25,15 @@ MicroCloud is designed for small-scale private clouds and hybrid cloud extension --- -## In the MicroCloud documentation +## In this documentation -````{grid} 1 1 2 2 - -```{grid-item} [Tutorials](/tutorial/index) - -**Start here**: hands-on {ref}`introductions to MicroCloud ` for new users -``` - -```{grid-item} [How-to guides](/how-to/index) - -**Step-by-step guides** covering key operations and common tasks such as {ref}`installing MicroCloud `, {ref}`adding ` and {ref}`removing ` cluster members, and {ref}`accessing the UI ` -``` - -```` - -````{grid} 1 1 2 2 -:reverse: - -```{grid-item} [Reference](/reference/index) - -**Technical information** - Detailed [requirements](/reference/requirements) -``` - -```{grid-item} [Explanation](/explanation/index) - -**Discussion and clarification** of key topics such as {ref}`networking ` and the [initialization process](/explanation/initialization/) -``` - -```` +| | | +|---|---| +| Start here | {ref}`Tutorial using multiple virtualized cluster members ` • {ref}`Tutorial using a single physical cluster member ` • {ref}`MicroCloud overview ` | +| Storage and networks | {ref}`Understand local vs. distributed storage ` • {ref}`Understand MicroCloud's networking approach ` • {ref}`Configure an OVN underlay network ` • {ref}`Configure a dedicated Ceph network ` • {ref}`Add a service ` | +| Cluster management | {ref}`Add `, {ref}`remove `, and {ref}`shut down ` cluster members • {ref}`Access the web UI ` • {ref}`Common CLI commands reference ` | +| Setup and maintenance | {ref}`Installation ` • {ref}`Initialization ` • {ref}`Automate initialization with Terraform ` • {ref}`Update and upgrade ` • {ref}`Recover a cluster ` • {ref}`security` | +| Releases and requirements | {ref}`Supported and compatible releases ` • {ref}`Snaps and releases reference ` • {ref}`ref-release-notes` • {ref}`Setup requirements ` | --- @@ -74,12 +53,27 @@ Also, while each component's documentation includes instructions for removing cl ## Project and community -MicroCloud is a member of the Ubuntu family. It’s an open source project that warmly welcomes community projects, contributions, suggestions, fixes and constructive feedback. +MicroCloud is a member of the [Canonical](https://canonical.com) family. It’s an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback. + +### Get involved + +- {ref}`Support ` +- [Discussion forum](https://discourse.ubuntu.com/c/lxd/microcloud/145) +- {ref}`Contribute ` + +### Releases + +- {ref}`ref-release-notes` + +### Governance and policies + +- [Code of conduct](https://ubuntu.com/community/docs/ethos/code-of-conduct) + +### Commercial support + +Thinking about using MicroCloud for your next project? [Get in touch](https://canonical.com/microcloud/contact-us)! + -- [MicroCloud snap](https://snapcraft.io/microcloud) -- [Contribute](https://github.com/canonical/microcloud) -- [Get support](https://discourse.ubuntu.com/c/lxd/microcloud/145) -- [Thinking about using MicroCloud for your next project? Get in touch!](https://canonical.com/microcloud) ```{toctree} diff --git a/doc/redirects.py b/doc/redirects.py index ed2234379..dbd0f8254 100644 --- a/doc/redirects.py +++ b/doc/redirects.py @@ -5,4 +5,5 @@ 'how-to/remove_machine': '../member_remove', 'how-to/add_machine': '../member_add', 'how-to/shutdown_machine': '../member_shutdown', + 'how-to/commands': '../../reference/commands', } diff --git a/doc/how-to/commands.md b/doc/reference/commands.md similarity index 99% rename from doc/how-to/commands.md rename to doc/reference/commands.md index e96882197..e74ac3f20 100644 --- a/doc/how-to/commands.md +++ b/doc/reference/commands.md @@ -4,8 +4,8 @@ myst: description: MicroCloud CLI command reference, a cheat sheet of essential MicroCloud/LXD commands for managing instances, storage, networks, and cluster operations. --- -(howto-commands)= -# How to work with MicroCloud via the CLI (command cheat sheet) +(ref-commands)= +# Common CLI commands This guide lists CLI commands for common operations in MicroCloud. This command list is not meant to be exhaustive, but it gives a general overview and serves as an entry point to working with MicroCloud through the CLI. diff --git a/doc/reference/index.md b/doc/reference/index.md index ea7d42b5a..c6bda04a3 100644 --- a/doc/reference/index.md +++ b/doc/reference/index.md @@ -1,10 +1,27 @@ (reference)= # Reference -The reference material in this section provides technical information about MicroCloud. +This section provides technical reference guides for MicroCloud. + +## Requirements and releases + +Find out about requirements for a MicroCloud deployment, as well as information +about its release cycles, release types, and snaps. ```{toctree} :maxdepth: 1 MicroCloud requirements /reference/releases-snaps +/reference/release-notes/index +``` + +## Commands + +Use this command reference to perform common MicroCloud operations through the +CLI. + +```{toctree} +:maxdepth: 1 +/reference/commands +``` diff --git a/doc/reference/release-notes/index.md b/doc/reference/release-notes/index.md new file mode 100644 index 000000000..3debccf8d --- /dev/null +++ b/doc/reference/release-notes/index.md @@ -0,0 +1,43 @@ +--- +myst: + html_meta: + description: This page lists recent release notes for MicroCloud, which highlight new features, bug fixes, and other important information for each release. +--- + +(ref-release-notes)= +# Release notes + +This page lists recent release notes for MicroCloud, which highlight new features, bug fixes, and other important information for each release. + +## Release policy and schedule + +For details about the release policy and schedule, along with information about the MicroCloud snaps and channels, refer to {ref}`ref-releases-snaps`. + +## Upgrade instructions + +For full instructions on updating or upgrading MicroCloud, refer to {ref}`howto-update-upgrade`: + +- Feature releases are published on the {ref}`current feature track `. If you are already following this track and you want to manually update to the most recent feature release, consult the {ref}`howto-update` section. +- To move from one {ref}`LTS track ` to a higher LTS track or the feature track, consult the {ref}`howto-upgrade` section. + +(ref-release-notes-releases)= +## Releases + +```{toctree} +:titlesonly: +MicroCloud 3.2 +MicroCloud 3.1 +MicroCloud 2.1.2 LTS +MicroCloud 2.1.1 LTS +MicroCloud 2.1.0 LTS +``` + +(ref-release-notes-components)= +## Release notes for MicroCloud components + +- {ref}`LXD release notes ` +- {doc}`MicroCeph release notes ` + +MicroOVN does not yet publish release notes. + +Refer to {ref}`ref-releases-matrix` for the matrix of compatible releases across MicroCloud and its components. diff --git a/doc/reference/releases-snaps.md b/doc/reference/releases-snaps.md index 548f14986..7816f925f 100644 --- a/doc/reference/releases-snaps.md +++ b/doc/reference/releases-snaps.md @@ -19,7 +19,7 @@ The releases above are currently under standard support, meaning they receive bu The snaps for MicroCloud, LXD, MicroCeph, and MicroOVN must be installed on all members of the same MicroCloud cluster. The versions installed by each snap must be compatible with one another, and the same version of each snap must be installed on all cluster members. -Also see: {ref}`howto-update-upgrade` and {ref}`howto-snap`. +Refer to {ref}`howto-update-upgrade` and {ref}`howto-snap` for additional details. (ref-releases-microcloud)= ## MicroCloud releases @@ -36,7 +36,7 @@ MicroCloud follows the [Ubuntu release cycle](https://ubuntu.com/about/release-c (ref-releases-microcloud-lts-support)= #### Support -LTS releases receive standard support for five years, meaning that it receives continuous updates according to the support levels described below. An [Ubuntu Pro](https://ubuntu.com/pro) subscription can provide additional support and extends the support duration by an additional five years. +LTS releases receive standard support for five years, meaning that they receive continuous updates according to the support levels described below. An [Ubuntu Pro](https://ubuntu.com/pro) subscription can provide additional support and extends the support duration by an additional five years. Standard support for an LTS release starts at full support for its first two years, then moves to maintenance support for the remaining three years. Once an LTS reaches End of Life (EOL), it no longer receives any updates. @@ -59,17 +59,15 @@ Each feature release replaces the one before it, up to the next LTS release. Aft Feature releases receive continuous updates via each new release. The newest release at any given time is also eligible for additional support through an [Ubuntu Pro](https://ubuntu.com/pro) subscription. -Currently, since no feature release has been published since the last LTS, there is no supported feature release. - (ref-snaps-microcloud)= ## MicroCloud snap -MicroCloud is distributed as a [snap](https://snapcraft.io/docs). A key benefit of snap packaging is that it includes all required dependencies. This allows packages to run in a consistent environment on many different Linux distributions. Using the snap also streamlines updates through its [channels](https://snapcraft.io/docs/channels). +MicroCloud is distributed as a [snap](https://snapcraft.io/docs). A key benefit of snap packaging is that it includes all required dependencies. This allows packages to run in a consistent environment on many different Linux distributions. Using the snap also streamlines updates through its channels. (ref-snaps-microcloud-channels)= ### Channels -Each installed snap follows a [channel](https://snapcraft.io/docs/channels). Channels include a {ref}`track ` and a {ref}`risk level ` (for example, the {{current_feature_track}}/stable channel). Each channel points to one release at a time, and when a new release is published to a channel, it replaces the previous one. {ref}`Updating the snap ` then updates to that release. +Each installed snap follows a channel. Channels include a {ref}`track ` and a {ref}`risk level ` (for example, the {{current_feature_track}}/stable channel). Each channel points to one release at a time, and when a new release is published to a channel, it replaces the previous one. {ref}`Updating the snap ` then updates to that release. To view all available channels for MicroCloud, run: @@ -77,10 +75,12 @@ To view all available channels for MicroCloud, run: snap info microcloud ``` +For more information about channels, refer to {ref}`snap:explanation-how-snaps-work-channels-and-tracks` in the Snap documentation. + (ref-snaps-microcloud-tracks)= ### Tracks -MicroCloud releases are grouped under [snap tracks](https://snapcraft.io/docs/channels#heading--tracks). +MicroCloud releases are grouped under snap tracks. The current feature track is {{current_feature_track}}, and the currently supported LTS track is {{current_lts_track}}. The `1` track reached {abbr}`EOL (End of Life)` at the end of April 2025. @@ -104,17 +104,19 @@ If you {ref}`install the MicroCloud snap ` without spec (ref-snaps-microcloud-risk)= ### Risk levels -For each MicroCloud track, there are three [risk levels](https://snapcraft.io/docs/channels#heading--risk-levels): `stable`, `candidate`, and `edge`. +For each MicroCloud track, there are three risk levels: `stable`, `candidate`, and `edge`. We recommend that you use the `stable` risk level to install fully tested releases; this is the only risk level supported under [Ubuntu Pro](https://ubuntu.com/pro), as well as the default risk level if one is not specified at install. The `candidate` and `edge` levels offer newer but less-tested updates, posing higher risk. +For more information about risk levels, refer to {ref}`snap:explanation-how-snaps-work-channels-and-tracks` in the Snap documentation. + (ref-releases-snaps-components)= ## For MicroCloud components (ref-releases-snaps-lxd)= ### LXD -LXD follows a similar approach to its releases and snap as MicroCloud, including an LTS release every two years and more frequent feature releases on a feature track. For details, see {ref}`LXD releases and snap `. +LXD follows a similar approach to its releases and snap as MicroCloud, including an LTS release every two years and more frequent feature releases on a feature track. For details, refer to {ref}`LXD releases and snap `. (ref-releases-snaps-microceph)= ### MicroCeph @@ -125,7 +127,7 @@ The version of Ceph initially included in the release of an LTS version of Ubunt MicroCeph typically does not publish feature releases, but provides periodic non-breaking updates to existing releases, along with a new stable release corresponding to each Ceph release series. These MicroCeph releases share their upstream's release names (such as `quincy` or `squid`). -For details about MicroCeph, see {doc}`microceph:index`. For more information about the Ceph release cycle, visit the Ceph documentation: {ref}`ceph:ceph-releases-general`. +For details about MicroCeph, refer to the {doc}`MicroCeph documentation `. For more information about the Ceph release cycle, visit the {ref}`Ceph documentation `. (ref-releases-snaps-microovn)= ### MicroOVN releases and snap @@ -134,7 +136,7 @@ The upstream [OVN](https://www.ovn.org/) project follows a six-month release cad Every two years, the March version of OVN becomes an LTS version, such as the `24.03` version released in March of 2024. MicroOVN publishes versions that correspond to these upstream LTS versions. Stable maintenance is provided through upstream point releases for OVN and bugfixes for the snap deployment. -For more information, see the MicroOVN documentation: +For more information, refer to the MicroOVN documentation: - {doc}`microovn:developers/release-process` - {ref}`microovn:snap channels` @@ -146,7 +148,7 @@ By default, installed snaps update automatically when new releases are published With MicroCloud, this can be problematic because its component snaps must always use {ref}`compatible versions `, and because all members of a cluster must use the same version of each snap. -To prevent issues, {ref}`hold updates for MicroCloud and its components `. Furthermore, ensure that the all snaps are set to `in-cohort` (see {ref}`howto-update-sync`). +To prevent issues, {ref}`hold updates for MicroCloud and its components `. Furthermore, ensure that all snaps are set to `in-cohort` (refer to {ref}`howto-update-sync` for details). ## Related topics diff --git a/doc/tutorial/index.md b/doc/tutorial/index.md index 63a772e6a..e29747e2e 100644 --- a/doc/tutorial/index.md +++ b/doc/tutorial/index.md @@ -7,25 +7,67 @@ myst: (get-started)= (tutorials)= (tutorial)= -# Get started with MicroCloud +# Tutorials -MicroCloud is quick to set up. Once initialized, you can start using MicroCloud in the same way as a regular LXD cluster. The tutorials in this section provide you with an introduction to MicroCloud concepts and use, including its installation and initialization, and use of its graphical UI. +These tutorials provide you with an introduction to MicroCloud concepts and +usage. -A production MicroCloud should use at least three physical machines as cluster members. However, when first learning about MicroCloud, it's common to only have a single machine available. Thus, we offer two approaches for learning about MicroCloud that both use one physical machine: +A production MicroCloud should use at least three physical machines as cluster +members. However, you may only have a single physical machine available when +first learning about MicroCloud. Our tutorials only require a single physical +machine to complete, but keep in mind that these tutorials are intended for +learning purposes only. -{ref}`Set up a single-member MicroCloud with a physical machine ` -: This tutorial helps you understand the MicroCloud production environment. While certain cluster features are not available in a single-member setup (such as communication between cluster members), you will learn about other important MicroCloud concepts. After you complete this tutorial, you can join other physical machines to the cluster to create a multi-member setup. +## Get started with MicroCloud - This approach requires a higher level of Linux system administration knowledge on your part, such as how to configure your network interfaces. It also requires at least two additional physical storage disks and two network interfaces. +- {ref}`Set up a multi-member MicroCloud with virtual machines ` -{ref}`Set up a multi-member MicroCloud with virtual machines ` -: This tutorial uses a sandbox approach. It shows you how to create multiple LXD virtual machines (VMs) on a single physical host machine and use those VMs as MicroCloud cluster members. +In this tutorial, you will create multiple LXD virtual machines (VMs) on a +single physical host machine and use those VMs as cluster members. - Since this approach guides you in building and configuring a virtual environment step by step, you do not need system administration knowledge. You'll be able to create a multi-member cluster without requiring multiple physical machines, disks, and network interfaces. However, keep in mind that this approach is intended for learning purposes only. In production environments, only physical machines should be used for MicroCloud cluster members. +Follow this tutorial to learn about MicroCloud concepts, cluster initialization, +and the MicroCloud UI. + +- Hardware requirements: A single machine that supports LXD +- Prerequisites: Basic understanding of a CLI + +```{admonition} In production environments + :class: note + The use of virtual machines in this tutorial is intended for learning + purposes only. In production environments, only physical machines should be + used as MicroCloud cluster members. +``` + +## Advanced tutorial + +- {ref}`Set up a single-member MicroCloud with a physical machine + ` + +In this tutorial, you will install and initialize MicroCloud on a single +physical machine and access the MicroCloud UI. + +Follow this tutorial to learn more about the MicroCloud production environment. +Certain cluster features are not available in a single-member setup, but you +will still learn other important MicroCloud concepts. After you complete this +tutorial, you can join other physical machines to the cluster to create a +multi-member setup. + +- Hardware requirements: A single machine with two additional physical storage + disks and two network interfaces +- Prerequisites: Knowledge of Linux system administration, including the + configuration of network interfaces + +```{admonition} In production environments + :class: note + The use of a single physical machine in this tutorial is intended for + learning purposes only. In production environments, at least three physical + machines should be used as cluster members. +``` ```{toctree} :hidden: :maxdepth: 2 -Single physical machine as a cluster member -Multiple virtual machines as cluster members +Get started with MicroCloud +Advanced tutorial +``` diff --git a/doc/tutorial/multi-member.md b/doc/tutorial/multi-member.md index 6744b513e..01f13657b 100644 --- a/doc/tutorial/multi-member.md +++ b/doc/tutorial/multi-member.md @@ -4,7 +4,7 @@ This tutorial guides you through installing and initializing MicroCloud in a confined environment, including storage and networking. You'll then start some instances to see what you can do with MicroCloud. This tutorial uses LXD virtual machines (VMs) for the MicroCloud cluster members, so you don't need any extra hardware to follow it. ```{tip} - Only use physical machines in a production environment. Use VMs as cluster members only in testing or development environments, such as this tutorial. For this, nested virtualization must be enabled on your host machine. See the [Ubuntu Server documentation on how to check if nested virtualization is enabled](https://documentation.ubuntu.com/server/how-to/virtualisation/enable-nested-virtualisation). + Only use physical machines in a production environment. Use VMs as cluster members only in testing or development environments, such as this tutorial. For this, nested virtualization must be enabled on your host machine. Consult the [Ubuntu Server documentation for details on how to check if nested virtualization is enabled](https://ubuntu.com/server/docs/how-to/virtualisation/enable-nested-virtualisation/). We also limit each machine in this tutorial to 2 GiB of RAM, which is less than the recommended hardware requirements. In the context of this tutorial, this amount of RAM is sufficient. However, in a production environment, make sure to use machines that fulfill the {ref}`reference-requirements-hardware`. ``` @@ -1098,8 +1098,8 @@ See {ref}`lxd:access-ui` for more information. (tutorial-multi-next)= ## Next steps -Now that your MicroCloud is up and running, you can start using it! If you're already familiar with LXD, see {ref}`howto-commands` for a reference of the most common commands. +Now that your MicroCloud is up and running, you can start using it! If you're already familiar with LXD, see {ref}`ref-commands` for a reference of the most common commands. If you're new to LXD, check out the {ref}`LXD tutorial ` to familiarize yourself with what you can do in LXD. It guides you through common initial operations in LXD, using either the CLI or a web-based graphical UI. Skip the first section about installing and initializing LXD, because LXD is already operational as part of your MicroCloud setup. -Keep in mind that the MicroCloud cluster you created using virtual machines as cluster members in this tutorial is not intended for production use. Follow {ref}`tutorial-multi` if you want to try setting up MicroCloud on a physical machine. +Keep in mind that the MicroCloud cluster you created using virtual machines as cluster members in this tutorial is not intended for production use. Follow our {ref}`next tutorial ` if you want to try setting up MicroCloud on a physical machine. diff --git a/doc/tutorial/single-member.md b/doc/tutorial/single-member.md index fe196a6c1..46eb3c27f 100644 --- a/doc/tutorial/single-member.md +++ b/doc/tutorial/single-member.md @@ -32,8 +32,8 @@ If you cannot meet the requirements below, try the {ref}`multi-member cluster tu sudo snap remove --purge lxd ``` -- If LXD is installed but not initialized, you do not need to remove it. However, ensure that it is running the most recent LTS version. See the LXD documentation: {ref}`lxd:howto-snap`. -- If MicroCeph or MicroOVN are already installed, ensure that they are also running their most recent LTS versions. To ensure that the versions are compatible for MicroCloud, refer to: {ref}`ref-releases-matrix`. +- If LXD is installed but not initialized, you do not need to remove it. However, ensure that it is running the most recent LTS version. Refer to {ref}`lxd:howto-snap` in the LXD documentation for details. +- If MicroCeph or MicroOVN are already installed, ensure that they are also running their most recent LTS versions. To ensure that the versions are compatible for MicroCloud, refer to {ref}`ref-releases-matrix`. - Due to variation in physical machine setups, it is beyond the scope of this tutorial to instruct you on how to set up your network interfaces and storage disks. Thus, this tutorial requires a higher level of knowledge of server management on your part. If you require step-by-step instructions, follow the {ref}`multi-member cluster tutorial` instead. (tutorial-single-requirements-storage)= @@ -67,7 +67,7 @@ sudo snap install lxd microceph microovn microcloud --cohort="+" ```{admonition} About the cohort flag :class: note The `--cohort="+"` flag in the command ensures that the same version of the snap is installed on all cluster members. -See {ref}`howto-update-sync` for more information. +Refer to {ref}`howto-update-sync` for more information. ``` (tutorial-single-hold-updates)= @@ -81,14 +81,14 @@ Thus, whenever you install MicroCloud and its components, pause automatic update sudo snap refresh lxd microceph microovn microcloud --hold ``` -For more information, see {ref}`howto-update-hold`. +For more information, refer to {ref}`howto-update-hold`. (tutorial-single-init)= ## Initialize MicroCloud The initialization process sets up LXD, MicroCeph, and MicroOVN to work together as a MicroCloud. When you initialize MicroCloud, you can optionally set up the MicroCloud cluster using a join mechanism to add multiple machines as cluster members. In this tutorial, we will set up a single cluster member during initialization; you can optionally add more cluster members afterward. The initialization also sets up MicroCloud storage and network configurations. -For a detailed look at the initialization process, see: {ref}`explanation-initialization`. +For a detailed look at the initialization process, refer to {ref}`explanation-initialization`. ```{tip} In this tutorial, we initialize MicroCloud interactively. Later, you might want to look into using a preseed file for {ref}`howto-initialize-preseed` to automate deployment with a pre-defined configuration. @@ -193,13 +193,13 @@ By default, MicroCloud uses your internal network for both. Press {kbd}`Enter` t ```{admonition} Using other networks for Ceph :class: note -MicroCloud and MicroCeph support using separate networks for Ceph internal and public traffic if needed. We don't need this for the purposes of this tutorial, but if you'd like to know more, see: {ref}`howto-ceph-networking`. +MicroCloud and MicroCeph support using separate networks for Ceph internal and public traffic if needed. We don't need this for the purposes of this tutorial, but if you'd like to know more, refer to {ref}`howto-ceph-networking`. ``` (tutorial-single-init-network-uplink)= ### Configure the uplink network -Next, you'll set up the uplink network that provides external connectivity from your cluster members to other networks, such as the internet. This uplink network is configured with MicroOVN, a minimal wrapper around the OVN (Open Virtual Network) project. For more information about OVN networking, see {ref}`exp-networking`. +Next, you'll set up the uplink network that provides external connectivity from your cluster members to other networks, such as the internet. This uplink network is configured with MicroOVN, a minimal wrapper around the OVN (Open Virtual Network) project. For more information about OVN networking, refer to {ref}`exp-networking`. MicroCloud will ask: @@ -255,7 +255,7 @@ This refers to an OVN underlay network. Press {kbd}`Enter` to accept the default ```{admonition} Using a dedicated underlay network :class: note -MicroCloud and MicroOVN support using a dedicated underlay network for OVN traffic. We don't need this for the purposes of this tutorial, but if you'd like to know more, see: {ref}`howto-ovn-underlay`. +MicroCloud and MicroOVN support using a dedicated underlay network for OVN traffic. We don't need this for the purposes of this tutorial, but if you'd like to know more, refer to {ref}`howto-ovn-underlay`. ``` You should then see the following output: @@ -445,7 +445,7 @@ locations: project: default ``` -The OVN network spans across all MicroCloud cluster members and handles internal traffic. It also provides external connectivity to MicroCloud instances by connecting to the uplink network via a virtual router. This virtual router is active on only one cluster member at a time. When there are multiple cluster members, if the cluster member with the virtual router goes offline, the virtual router can migrate to a different cluster member to ensure uplink connectivity. For details, see: {ref}`exp-networking-ovn-architecture`. +The OVN network spans across all MicroCloud cluster members and handles internal traffic. It also provides external connectivity to MicroCloud instances by connecting to the uplink network via a virtual router. This virtual router is active on only one cluster member at a time. When there are multiple cluster members, if the cluster member with the virtual router goes offline, the virtual router can migrate to a different cluster member to ensure uplink connectivity. For details, refer to {ref}`exp-networking-ovn-architecture`. Within the output of the previous command (`lxc network show default`), find the value for `volatile.network.ipv4.address`. It should match the first IPv4 address in the subnet range you provided for the uplink network during configuration. This is the IP address for the virtual router. @@ -666,7 +666,7 @@ This ping should fail. Other instances should not be reachable because they are ```{admonition} OVN peer routing :class: tip -If you want to enable direct connectivity for instances on different OVN subnets, see: {ref}`lxd:network-ovn-peers`. +If you want to enable direct connectivity for instances on different OVN subnets, refer to {ref}`lxd:network-ovn-peers`. ``` Exit the `u4` container: @@ -678,7 +678,7 @@ exit (tutorial-single-ui)= ## Access the UI -Instead of managing your instances and your LXD setup from the command line, you can also use the LXD UI. See {ref}`lxd:access-ui` for more information. +Instead of managing your instances and your LXD setup from the command line, you can also use the LXD UI. Refer to {ref}`lxd:access-ui` for more information. Check the LXD cluster list to determine the URL of the cluster member. @@ -719,8 +719,8 @@ You should now see the LXD UI prompting you to set up a certificate. Follow the (tutorial-single-next)= ## Next steps -To learn how to add more physical machines as cluster members to your MicroCloud, see: {ref}`howto-member-add`. +To learn how to add more physical machines as cluster members to your MicroCloud, refer to {ref}`howto-member-add`. -See {ref}`howto-commands` for a reference of the most common commands. +Consult {ref}`ref-commands` for a reference of the most common commands. If you're new to LXD, check out the {ref}`LXD tutorials ` to familiarize yourself with what you can do in LXD. You can skip the sections for installing and initializing LXD, because LXD is already operational as part of your MicroCloud setup. diff --git a/service/microceph.go b/service/microceph.go index e922f7fd6..9b827c3ad 100644 --- a/service/microceph.go +++ b/service/microceph.go @@ -103,9 +103,10 @@ func (s CephService) Bootstrap(ctx context.Context) error { } } -// IssueToken issues a token for the given peer. Each token will last 5 minutes in case the system joins the cluster very slowly. +// IssueToken issues a token for the given peer. +// Each token will last 1 hour in case the system joins the cluster very slowly or there are other services which take longer to join (e.g. MicroCeph OSD setup). func (s CephService) IssueToken(ctx context.Context, peer string) (string, error) { - return s.m.NewJoinToken(ctx, peer, 5*time.Minute) + return s.m.NewJoinToken(ctx, peer, ServiceJoinTokenLifetime) } // DeleteToken deletes a token by its name. diff --git a/service/microcloud.go b/service/microcloud.go index 9e5d1cbd2..2c3e9ba95 100644 --- a/service/microcloud.go +++ b/service/microcloud.go @@ -99,9 +99,10 @@ func (s CloudService) Bootstrap(ctx context.Context) error { } } -// IssueToken issues a token for the given peer. Each token will last 5 minutes in case the system joins the cluster very slowly. +// IssueToken issues a token for the given peer. +// Each token will last 1 hour in case the system joins the cluster very slowly or there are other services which take longer to join (e.g. MicroCeph OSD setup). func (s CloudService) IssueToken(ctx context.Context, peer string) (string, error) { - return s.client.NewJoinToken(ctx, peer, 5*time.Minute) + return s.client.NewJoinToken(ctx, peer, ServiceJoinTokenLifetime) } // DeleteToken deletes a token by its name. diff --git a/service/microovn.go b/service/microovn.go index 32aece657..a4b8e25fb 100644 --- a/service/microovn.go +++ b/service/microovn.go @@ -89,9 +89,10 @@ func (s OVNService) Bootstrap(ctx context.Context) error { } } -// IssueToken issues a token for the given peer. Each token will last 5 minutes in case the system joins the cluster very slowly. +// IssueToken issues a token for the given peer. +// Each token will last 1 hour in case the system joins the cluster very slowly or there are other services which take longer to join (e.g. MicroCeph OSD setup). func (s OVNService) IssueToken(ctx context.Context, peer string) (string, error) { - return s.m.NewJoinToken(ctx, peer, 5*time.Minute) + return s.m.NewJoinToken(ctx, peer, ServiceJoinTokenLifetime) } // DeleteToken deletes a token by its name. diff --git a/service/service_handler.go b/service/service_handler.go index 82d83aef9..12e4ab99e 100644 --- a/service/service_handler.go +++ b/service/service_handler.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "sync" + "time" "github.com/canonical/lxd/shared/api" @@ -31,6 +32,9 @@ const ( CloudMulticastPort int64 = 9444 ) +// ServiceJoinTokenLifetime is the duration for which a join token issued by a service will be valid. +const ServiceJoinTokenLifetime = time.Hour + // Handler holds a set of stateful services. type Handler struct { Services map[types.ServiceType]Service diff --git a/test/includes/microcloud.sh b/test/includes/microcloud.sh index 795305168..401f8c595 100644 --- a/test/includes/microcloud.sh +++ b/test/includes/microcloud.sh @@ -1313,15 +1313,17 @@ setup_system() { if [ ! "${BASE_OS}" = "22.04" ]; then packages+=" yq" else - retry lxc exec "${name}" -- snap install yq + lxc exec "${name}" -- snap install yq fi # shellcheck disable=SC2086 retry lxc exec "${name}" -- apt-get install --no-install-recommends -y ${packages} - retry lxc exec "${name}" -- snap install snapd + lxc exec "${name}" -- sh -ceu ' + snap refresh snapd --channel latest/beta || snap install snapd --channel latest/beta + ' # Install core26 to allow latest MicroOVN to be used. - retry lxc exec "${name}" -- snap install core26 --channel latest/edge + lxc exec "${name}" -- snap install core26 --channel latest/edge # Free disk blocks lxc exec "${name}" -- apt-get clean @@ -1364,7 +1366,7 @@ setup_system() { lxc file push --quiet "${MICROCLOUD_SNAP_PATH}" "${name}"/root/microcloud.snap lxc exec "${name}" -- snap install --dangerous /root/microcloud.snap else - retry lxc exec "${name}" -- snap install microcloud --channel="${MICROCLOUD_SNAP_CHANNEL}" --cohort="+" + lxc exec "${name}" -- snap install microcloud --channel="${MICROCLOUD_SNAP_CHANNEL}" --cohort="+" fi # Hold the snaps to not perform any refreshes during test execution. diff --git a/test/suites/basic.sh b/test/suites/basic.sh index d46b7b107..596515c81 100644 --- a/test/suites/basic.sh +++ b/test/suites/basic.sh @@ -736,7 +736,7 @@ test_service_mismatch() { reset_systems 1 3 1 lxc exec micro01 -- sysctl -wq net.ipv6.conf.enp5s0.disable_ipv6=1 - retry lxc exec micro01 -- snap refresh microceph --channel reef/stable + lxc exec micro01 -- snap refresh microceph --channel reef/stable ! join_session init micro01 || false lxc exec micro01 -- tail -1 out | grep -q "The installed version of MicroCeph is not supported" diff --git a/test/suites/upgrade.sh b/test/suites/upgrade.sh index e5defe115..a275dcd0f 100644 --- a/test/suites/upgrade.sh +++ b/test/suites/upgrade.sh @@ -107,6 +107,9 @@ ovn: done done + # Wait for MicroCeph to stabilize before proceeding. + lxc exec micro01 -- microceph waitready --storage --timeout 300 + for m in micro01 micro02 micro03; do # There was no encryption neither dedicated Ceph networks and CephFS in MicroCloud 1. validate_system_microceph "${m}" 0 0 disk2