Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 94 additions & 9 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,97 @@ jobs:
path: "*.snap"
retention-days: 5

dsl-functional-tests:
name: DSL functional tests
runs-on: ubuntu-22.04
needs: build-microceph
env:
DSL_FUNCTEST_SUITE: run_dsl_full_tests
steps:
- name: Download snap
uses: actions/download-artifact@v4
with:
name: snaps
path: /home/runner

- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Copy utils
run: cp tests/scripts/actionutils.sh $HOME

- name: Clear FORWARD firewall rules
run: ~/actionutils.sh cleaript

- name: Free disk
run: ~/actionutils.sh free_runner_disk

- name: Install dependencies
run: ~/actionutils.sh setup_lxd

- name: Run DSL functional test suite
run: |
chmod +x tests/scripts/test_dsl_functest.sh
./tests/scripts/test_dsl_functest.sh \
--snap-path '/home/runner/*.snap' \
"$DSL_FUNCTEST_SUITE"
- name: Print LXD state for failure
if: failure()
run: |
lxc list || true
lxc storage list || true
lxc storage volume list default || true
api-tests:
name: API tests
runs-on: ubuntu-22.04
needs: build-microceph
steps:
- name: Download snap
uses: actions/download-artifact@v4
with:
name: snaps
path: /home/runner

- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Copy utils
run: cp tests/scripts/actionutils.sh $HOME

- name: Free disk
run: ~/actionutils.sh free_runner_disk

- name: Bootstrap MicroCeph
run: ~/actionutils.sh install_and_bootstrap_microceph

- name: API Testing
run: |
~/actionutils.sh install_hurl
~/actionutils.sh prepare_disk_api_hurl_fixtures
~/actionutils.sh hurl tests/hurl/services-mon.hurl
~/actionutils.sh hurl tests/hurl/maintenance-put-failed.hurl
~/actionutils.sh hurl tests/hurl/disks-list.hurl
- name: API disk Hurl testing with discoverable devices
run: |
~/actionutils.sh cleaript
~/actionutils.sh setup_lxd
chmod +x tests/scripts/test_dsl_functest.sh
./tests/scripts/test_dsl_functest.sh \
--snap-path '/home/runner/*.snap' \
test_dsl_api_disk_hurl
- name: Print logs for failure
if: failure()
run: |
~/actionutils.sh dump_microceph_debug
static-checks:
name: Run static checks
runs-on: ubuntu-24.04
Expand Down Expand Up @@ -284,11 +375,6 @@ jobs:
output=$(sudo microceph log get-level)
if [[ "$output" != "3" ]] ; then echo "incorrect log level: $output"; exit 1; fi
- name: Print logs for failure
if: failure()
run: |
sudo snap logs microceph -n 1000
- name: Test square brackets around IPv6
run: |
sudo snap remove microceph
Expand All @@ -298,11 +384,10 @@ jobs:
cat /var/snap/microceph/current/conf/ceph.conf
fgrep -q "[${MON_IP}]" /var/snap/microceph/current/conf/ceph.conf
- name: API Testing
- name: Print logs for failure
if: failure()
run: |
~/actionutils.sh install_hurl
~/actionutils.sh hurl tests/hurl/services-mon.hurl
~/actionutils.sh hurl tests/hurl/maintenance-put-failed.hurl
~/actionutils.sh dump_microceph_debug
multi-node-tests:
name: Multi node testing
Expand Down
45 changes: 45 additions & 0 deletions docs/reference/commands/disk.rst
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,17 @@ Flags:
--all-available add all available devices as OSDs
--db-device string The device used for the DB
--db-encrypt Encrypt the DB device prior to use
--db-match string DSL expression to match backing devices for DB partitions
--db-size string Requested DB partition size for --db-match
--db-wipe Wipe the DB device prior to use
--dry-run Show matched devices without adding them (requires --osd-match)
--encrypt Encrypt the disk prior to use (only block devices)
--json Provide dry-run output as a JSON-encoded DiskAddResponse
--osd-match string DSL expression to match devices for OSD creation
--wal-device string The device used for WAL
--wal-encrypt Encrypt the WAL device prior to use
--wal-match string DSL expression to match backing devices for WAL partitions
--wal-size string Requested WAL partition size for --wal-match
--wal-wipe Wipe the WAL device prior to use
--wipe Wipe the disk prior to use
Expand Down Expand Up @@ -111,6 +116,43 @@ Example expressions:
# Preview matches without adding
microceph disk add --osd-match "eq(@type, 'sata')" --dry-run
# Select WAL/DB carriers separately and control their wipe/encryption independently
microceph disk add --osd-match "eq(@type, 'ssd')" --encrypt \
--wal-match "eq(@type, 'nvme')" --wal-size 1GiB --wal-encrypt --wal-wipe \
--db-match "eq(@type, 'sata')" --db-size 4GiB --db-encrypt --db-wipe
Dry-run planning output
^^^^^^^^^^^^^^^^^^^^^^^

When ``--dry-run`` is used with ``--osd-match`` only, MicroCeph prints the
OSD devices that would be added.

When ``--dry-run`` is used together with WAL and/or DB matching, MicroCeph
prints a provisioning plan table with the selected OSDs, the WAL/DB carrier
paths, the planned partition numbers and sizes, and two additional columns:
``WAL ACTION`` and ``DB ACTION``.

When ``--dry-run --json`` is used, MicroCeph prints the underlying
``DiskAddResponse`` document directly instead of a human-formatted table. This
machine-readable output is intended for shell automation and behaviour tests.
The JSON payload keeps the same fields used by the API under ``metadata``:
``validation_error``, ``warnings``, ``dry_run_devices``, and ``dry_run_plan``.
Each ``dry_run_plan`` entry contains the selected ``osd_path`` and optional
nested ``wal``/``db`` objects with ``kind``, ``parent_path``, ``partition``,
``size``, and ``reset_before_use``.

The action column values mean:

- ``new``: create the first auxiliary partition on a clean carrier
- ``append``: add another partition on a carrier already used by the current cluster
- ``reset``: wipe/reset the carrier before creating the planned partition(s)

A ``reset`` action is shown when ``--wal-wipe`` or ``--db-wipe`` allows a
matched carrier to be reclaimed before partitioning, for example when the disk
already contains foreign data or a foreign partition table. In these cases,
``--dry-run`` also emits an explicit warning naming each carrier that would be
wiped/reset before partitioning.

Available predicates:

- ``and(a, b, ...)`` - Logical AND (variadic)
Expand All @@ -137,6 +179,9 @@ Numbers and units must be written without any space between them (e.g., ``100GiB
Limitations:

- ``--osd-match`` cannot be used together with ``--wal-device`` or ``--db-device``.
- ``--wal-encrypt`` and ``--wal-wipe`` require ``--wal-match`` when using DSL-based selection.
- ``--db-encrypt`` and ``--db-wipe`` require ``--db-match`` when using DSL-based selection.
- ``--wal-match`` and ``--db-match`` must resolve to disjoint device sets.


``list``
Expand Down
95 changes: 89 additions & 6 deletions microceph/api/disks.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"strconv"
"sync"

"github.com/canonical/lxd/shared/units"
"github.com/canonical/microceph/microceph/interfaces"

"github.com/canonical/microceph/microceph/logger"
Expand Down Expand Up @@ -61,11 +62,16 @@ func cmdDisksPost(s state.State, r *http.Request) response.Response {
return response.InternalError(err)
}

err = validateDiskPostRequest(req)
if err != nil {
return response.SyncResponse(true, types.DiskAddResponse{ValidationError: err.Error()})
}

mu.Lock()
defer mu.Unlock()

// Handle DSL-based device selection
if req.OSDMatch != "" {
// Handle DSL-based device selection.
if usesDSLDiskAddRequest(req) {
return handleDSLDiskAdd(r, s, req)
}

Expand Down Expand Up @@ -101,9 +107,85 @@ func cmdDisksPost(s state.State, r *http.Request) response.Response {
return response.SyncResponse(true, resp)
}

// handleDSLDiskAdd handles DSL-based device selection for OSD creation.
func usesDSLDiskAddRequest(req types.DisksPost) bool {
return req.OSDMatch != "" || req.WALMatch != "" || req.DBMatch != "" || req.DryRun || req.WALSize != "" || req.DBSize != ""
}

func validatePositiveByteSizeString(value string, flagName string) error {
sizeBytes, err := units.ParseByteSizeString(value)
if err != nil {
return fmt.Errorf("invalid %s: %w", flagName, err)
}
if sizeBytes <= 0 {
return fmt.Errorf("%s must be greater than 0", flagName)
}
return nil
}

func validateDiskPostRequest(req types.DisksPost) error {
usesDSL := usesDSLDiskAddRequest(req)

if usesDSL && len(req.Path) > 0 {
return fmt.Errorf("--osd-match/--wal-match/--db-match cannot be used with positional device arguments")
}

if req.DryRun && req.OSDMatch == "" {
return fmt.Errorf("--dry-run requires --osd-match")
}

if usesDSL && (req.WALDev != nil || req.DBDev != nil) {
return fmt.Errorf("--wal-device and --db-device are not supported with DSL matching in this version")
}

if req.WALMatch != "" && req.OSDMatch == "" {
return fmt.Errorf("--wal-match requires --osd-match")
}
if req.DBMatch != "" && req.OSDMatch == "" {
return fmt.Errorf("--db-match requires --osd-match")
}
if req.WALMatch != "" && req.WALSize == "" {
return fmt.Errorf("--wal-match requires --wal-size")
}
if req.DBMatch != "" && req.DBSize == "" {
return fmt.Errorf("--db-match requires --db-size")
}
if req.WALSize != "" && req.WALMatch == "" {
return fmt.Errorf("--wal-size requires --wal-match")
}
if req.DBSize != "" && req.DBMatch == "" {
return fmt.Errorf("--db-size requires --db-match")
}
if req.WALEncrypt && req.WALMatch == "" && req.WALDev == nil {
return fmt.Errorf("--wal-encrypt requires --wal-match or --wal-device")
}
if req.WALWipe && req.WALMatch == "" && req.WALDev == nil {
return fmt.Errorf("--wal-wipe requires --wal-match or --wal-device")
}
if req.DBEncrypt && req.DBMatch == "" && req.DBDev == nil {
return fmt.Errorf("--db-encrypt requires --db-match or --db-device")
}
if req.DBWipe && req.DBMatch == "" && req.DBDev == nil {
return fmt.Errorf("--db-wipe requires --db-match or --db-device")
}
if req.WALMatch != "" {
err := validatePositiveByteSizeString(req.WALSize, "--wal-size")
if err != nil {
return err
}
}
if req.DBMatch != "" {
err := validatePositiveByteSizeString(req.DBSize, "--db-size")
if err != nil {
return err
}
}

return nil
}

// handleDSLDiskAdd handles DSL-based device selection for OSD creation and dry-run planning.
func handleDSLDiskAdd(r *http.Request, s state.State, req types.DisksPost) response.Response {
resp := ceph.AddDisksWithDSL(r.Context(), s, req.OSDMatch, req.Encrypt, req.Wipe, req.DryRun)
resp := ceph.AddDisksWithDSLRequest(r.Context(), s, req)
return response.SyncResponse(true, resp)
}

Expand Down Expand Up @@ -153,11 +235,12 @@ func cmdDisksDelete(s state.State, r *http.Request) response.Response {
// the flag has no effect in this case. Errors are non-fatal here since
// this is only an advisory warning.
if req.ConfirmDowngrade {
if onRack, err := ceph.IsOnRackRule(); err != nil {
onRack, err := ceph.IsOnRackRule()
if err != nil {
logger.Warnf("Could not determine crush rule type: %v", err)
} else if onRack {
logger.Warnf(
"--confirm-failure-domain-downgrade has no effect: cluster uses rack-level "+
"--confirm-failure-domain-downgrade has no effect: cluster uses rack-level " +
"failure domain (availability zones). Downgrade from rack is not supported.",
)
}
Expand Down
Loading
Loading