From 535333af0bc57ddf23de3483a70ec1ba6dd0ddfd Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Thu, 9 Apr 2026 11:00:55 -0400 Subject: [PATCH 01/12] update apikey var --- docusaurus.config.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docusaurus.config.js b/docusaurus.config.js index 5f3c75c6..3989bf9b 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -212,7 +212,7 @@ const config = { }, algolia: { appId: "4OZX85VEXQ", - apiKey: "9f96702edcf79d7097bedfce2813d49e", + apiKey: process.env.ALGOLIA_API_KEY || "9f96702edcf79d7097bedfce2813d49e", indexName: "upbound", contextualSearch: true, searchPagePath: "search", From 755c647a94d9e7481b3df8d5585362fe174e4e0d Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Thu, 9 Apr 2026 13:55:58 -0400 Subject: [PATCH 02/12] update sidebars --- .../version-1.13-sidebars.json | 116 ++++++++++++++++ .../version-1.14-sidebars.json | 116 ++++++++++++++++ .../version-1.15-sidebars.json | 115 +++++++++++++++ .../version-1.16-sidebars.json | 131 ++++++++++++++++++ 4 files changed, 478 insertions(+) create mode 100644 self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json create mode 100644 self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json create mode 100644 self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json create mode 100644 self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json diff --git a/self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json new file mode 100644 index 00000000..5e6d3cbe --- /dev/null +++ b/self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json @@ -0,0 +1,116 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "doc", + "id": "self-hosted-spaces-quickstart" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/groups", + "concepts/deployment-modes" + ] + }, + { + "type": "category", + "label": "Deploy", + "items": [ + "howtos/deployment-reqs", + "howtos/self-hosted-spaces-deployment", + "howtos/certs", + "howtos/proxies-config", + "howtos/attach-detach" + ] + }, + { + "type": "category", + "label": "Configure", + "items": [ + "howtos/configure-ha", + "howtos/administer-features", + "howtos/oidc-configuration", + "howtos/scaling-resources", + "howtos/spaces-management" + ] + }, + { + "type": "category", + "label": "Control Planes", + "items": [ + "howtos/managed-service", + "howtos/control-plane-topologies", + "howtos/auto-upgrade", + "howtos/declarative-ctps", + "howtos/controllers", + "howtos/migrating-to-mcps", + "howtos/simulations" + ] + }, + { + "type": "category", + "label": "Connect", + "items": [ + "howtos/api-connector", + "howtos/ctp-connector", + "howtos/mcp-connector-guide", + "howtos/query-api" + ] + }, + { + "type": "category", + "label": "Observe & Debug", + "items": [ + "howtos/observability", + "howtos/space-observability", + "howtos/debugging-a-ctp", + "howtos/troubleshooting" + ] + }, + { + "type": "category", + "label": "GitOps & Automation", + "items": [ + "howtos/automation-and-gitops/overview", + "howtos/gitops-with-argocd", + "howtos/use-argo" + ] + }, + { + "type": "category", + "label": "Data & Security", + "items": [ + "howtos/backup-and-restore", + "howtos/dr", + "howtos/secrets-management", + { + "type": "category", + "label": "Workload Identity", + "items": [ + "howtos/workload-id/backup-restore-config", + "howtos/workload-id/billing-config", + "howtos/workload-id/eso-config" + ] + } + ] + }, + { + "type": "category", + "label": "Billing & Licensing", + "items": [ + "howtos/billing" + ] + }, + { + "type": "doc", + "id": "reference/index", + "label": "API Reference" + } + ] +} diff --git a/self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json new file mode 100644 index 00000000..5e6d3cbe --- /dev/null +++ b/self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json @@ -0,0 +1,116 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "doc", + "id": "self-hosted-spaces-quickstart" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/groups", + "concepts/deployment-modes" + ] + }, + { + "type": "category", + "label": "Deploy", + "items": [ + "howtos/deployment-reqs", + "howtos/self-hosted-spaces-deployment", + "howtos/certs", + "howtos/proxies-config", + "howtos/attach-detach" + ] + }, + { + "type": "category", + "label": "Configure", + "items": [ + "howtos/configure-ha", + "howtos/administer-features", + "howtos/oidc-configuration", + "howtos/scaling-resources", + "howtos/spaces-management" + ] + }, + { + "type": "category", + "label": "Control Planes", + "items": [ + "howtos/managed-service", + "howtos/control-plane-topologies", + "howtos/auto-upgrade", + "howtos/declarative-ctps", + "howtos/controllers", + "howtos/migrating-to-mcps", + "howtos/simulations" + ] + }, + { + "type": "category", + "label": "Connect", + "items": [ + "howtos/api-connector", + "howtos/ctp-connector", + "howtos/mcp-connector-guide", + "howtos/query-api" + ] + }, + { + "type": "category", + "label": "Observe & Debug", + "items": [ + "howtos/observability", + "howtos/space-observability", + "howtos/debugging-a-ctp", + "howtos/troubleshooting" + ] + }, + { + "type": "category", + "label": "GitOps & Automation", + "items": [ + "howtos/automation-and-gitops/overview", + "howtos/gitops-with-argocd", + "howtos/use-argo" + ] + }, + { + "type": "category", + "label": "Data & Security", + "items": [ + "howtos/backup-and-restore", + "howtos/dr", + "howtos/secrets-management", + { + "type": "category", + "label": "Workload Identity", + "items": [ + "howtos/workload-id/backup-restore-config", + "howtos/workload-id/billing-config", + "howtos/workload-id/eso-config" + ] + } + ] + }, + { + "type": "category", + "label": "Billing & Licensing", + "items": [ + "howtos/billing" + ] + }, + { + "type": "doc", + "id": "reference/index", + "label": "API Reference" + } + ] +} diff --git a/self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json new file mode 100644 index 00000000..c932d0ac --- /dev/null +++ b/self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json @@ -0,0 +1,115 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "doc", + "id": "self-hosted-spaces-quickstart" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "Deploy", + "items": [ + "howtos/deployment-reqs", + "howtos/self-hosted-spaces-deployment", + "howtos/certs", + "howtos/attach-detach" + ] + }, + { + "type": "category", + "label": "Configure", + "items": [ + "howtos/configure-ha", + "howtos/oidc-configuration", + "howtos/scaling-resources", + "howtos/spaces-management" + ] + }, + { + "type": "category", + "label": "Control Planes", + "items": [ + "howtos/managed-service", + "howtos/control-plane-topologies", + "howtos/auto-upgrade", + "howtos/declarative-ctps", + "howtos/controllers", + "howtos/migrating-to-mcps", + "howtos/simulations" + ] + }, + { + "type": "category", + "label": "Connect", + "items": [ + "howtos/api-connector", + "howtos/ctp-connector", + "howtos/mcp-connector-guide", + "howtos/query-api" + ] + }, + { + "type": "category", + "label": "Observe & Debug", + "items": [ + "howtos/observability", + "howtos/space-observability", + "howtos/ctp-audit-logs", + "howtos/debugging-a-ctp", + "howtos/troubleshooting" + ] + }, + { + "type": "category", + "label": "GitOps & Automation", + "items": [ + "howtos/automation-and-gitops/overview", + "howtos/gitops-with-argocd", + "howtos/use-argo" + ] + }, + { + "type": "category", + "label": "Data & Security", + "items": [ + "howtos/backup-and-restore", + "howtos/dr", + "howtos/secrets-management", + { + "type": "category", + "label": "Workload Identity", + "items": [ + "howtos/workload-id/backup-restore-config", + "howtos/workload-id/billing-config", + "howtos/workload-id/eso-config" + ] + } + ] + }, + { + "type": "category", + "label": "Billing & Licensing", + "items": [ + "howtos/billing", + "howtos/capacity-licensing" + ] + }, + { + "type": "doc", + "id": "reference/index", + "label": "API Reference" + } + ] +} diff --git a/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json new file mode 100644 index 00000000..b3429579 --- /dev/null +++ b/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json @@ -0,0 +1,131 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "doc", + "id": "self-hosted-spaces-quickstart" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/groups", + "concepts/deployment-modes" + ] + }, + { + "type": "category", + "label": "Deploy", + "items": [ + "howtos/deployment-reqs", + "howtos/self-hosted-spaces-deployment", + "howtos/certs", + "howtos/proxies-config", + "howtos/ingress", + "howtos/ingress-nginx-migration", + "howtos/mirror-images", + "howtos/attach-detach" + ] + }, + { + "type": "category", + "label": "Configure", + "items": [ + "howtos/configure-ha", + "howtos/administer-features", + "howtos/oidc-configuration", + "howtos/scaling-resources", + "howtos/spaces-management" + ] + }, + { + "type": "category", + "label": "Control Planes", + "items": [ + "howtos/managed-service", + "howtos/control-plane-topologies", + "howtos/auto-upgrade", + "howtos/declarative-ctps", + "howtos/controllers", + "howtos/migrating-to-mcps", + "howtos/simulations" + ] + }, + { + "type": "category", + "label": "Connect", + "items": [ + "howtos/api-connector", + "howtos/ctp-connector", + "howtos/mcp-connector-guide", + "howtos/query-api", + "howtos/deploy-query-api" + ] + }, + { + "type": "category", + "label": "Observe & Debug", + "items": [ + "howtos/observability", + "howtos/space-observability", + { + "type": "category", + "label": "Tracing", + "items": [ + "howtos/tracing/overview", + "howtos/tracing/query-api", + "howtos/tracing/spaces-api", + "howtos/tracing/spaces-router" + ] + }, + "howtos/ctp-audit-logs", + "howtos/debugging-a-ctp", + "howtos/troubleshooting" + ] + }, + { + "type": "category", + "label": "GitOps & Automation", + "items": [ + "howtos/gitops", + "howtos/use-argo" + ] + }, + { + "type": "category", + "label": "Data & Security", + "items": [ + "howtos/backup-and-restore", + "howtos/dr", + "howtos/secrets-management", + { + "type": "category", + "label": "Workload Identity", + "items": [ + "howtos/workload-id/backup-restore-config", + "howtos/workload-id/billing-config", + "howtos/workload-id/eso-config" + ] + } + ] + }, + { + "type": "category", + "label": "Billing & Licensing", + "items": [ + "howtos/billing", + "howtos/capacity-licensing" + ] + }, + { + "type": "doc", + "id": "reference/index", + "label": "API Reference" + } + ] +} From c9659999213ed11bfa288814a2394038d496a390 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Thu, 9 Apr 2026 13:57:31 -0400 Subject: [PATCH 03/12] Revert "update sidebars" This reverts commit 755c647a94d9e7481b3df8d5585362fe174e4e0d. --- .../version-1.13-sidebars.json | 116 ---------------- .../version-1.14-sidebars.json | 116 ---------------- .../version-1.15-sidebars.json | 115 --------------- .../version-1.16-sidebars.json | 131 ------------------ 4 files changed, 478 deletions(-) delete mode 100644 self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json delete mode 100644 self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json delete mode 100644 self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json delete mode 100644 self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json diff --git a/self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json deleted file mode 100644 index 5e6d3cbe..00000000 --- a/self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json +++ /dev/null @@ -1,116 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "doc", - "id": "self-hosted-spaces-quickstart" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/groups", - "concepts/deployment-modes" - ] - }, - { - "type": "category", - "label": "Deploy", - "items": [ - "howtos/deployment-reqs", - "howtos/self-hosted-spaces-deployment", - "howtos/certs", - "howtos/proxies-config", - "howtos/attach-detach" - ] - }, - { - "type": "category", - "label": "Configure", - "items": [ - "howtos/configure-ha", - "howtos/administer-features", - "howtos/oidc-configuration", - "howtos/scaling-resources", - "howtos/spaces-management" - ] - }, - { - "type": "category", - "label": "Control Planes", - "items": [ - "howtos/managed-service", - "howtos/control-plane-topologies", - "howtos/auto-upgrade", - "howtos/declarative-ctps", - "howtos/controllers", - "howtos/migrating-to-mcps", - "howtos/simulations" - ] - }, - { - "type": "category", - "label": "Connect", - "items": [ - "howtos/api-connector", - "howtos/ctp-connector", - "howtos/mcp-connector-guide", - "howtos/query-api" - ] - }, - { - "type": "category", - "label": "Observe & Debug", - "items": [ - "howtos/observability", - "howtos/space-observability", - "howtos/debugging-a-ctp", - "howtos/troubleshooting" - ] - }, - { - "type": "category", - "label": "GitOps & Automation", - "items": [ - "howtos/automation-and-gitops/overview", - "howtos/gitops-with-argocd", - "howtos/use-argo" - ] - }, - { - "type": "category", - "label": "Data & Security", - "items": [ - "howtos/backup-and-restore", - "howtos/dr", - "howtos/secrets-management", - { - "type": "category", - "label": "Workload Identity", - "items": [ - "howtos/workload-id/backup-restore-config", - "howtos/workload-id/billing-config", - "howtos/workload-id/eso-config" - ] - } - ] - }, - { - "type": "category", - "label": "Billing & Licensing", - "items": [ - "howtos/billing" - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json deleted file mode 100644 index 5e6d3cbe..00000000 --- a/self-hosted-spaces_versioned_sidebars/version-1.14-sidebars.json +++ /dev/null @@ -1,116 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "doc", - "id": "self-hosted-spaces-quickstart" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/groups", - "concepts/deployment-modes" - ] - }, - { - "type": "category", - "label": "Deploy", - "items": [ - "howtos/deployment-reqs", - "howtos/self-hosted-spaces-deployment", - "howtos/certs", - "howtos/proxies-config", - "howtos/attach-detach" - ] - }, - { - "type": "category", - "label": "Configure", - "items": [ - "howtos/configure-ha", - "howtos/administer-features", - "howtos/oidc-configuration", - "howtos/scaling-resources", - "howtos/spaces-management" - ] - }, - { - "type": "category", - "label": "Control Planes", - "items": [ - "howtos/managed-service", - "howtos/control-plane-topologies", - "howtos/auto-upgrade", - "howtos/declarative-ctps", - "howtos/controllers", - "howtos/migrating-to-mcps", - "howtos/simulations" - ] - }, - { - "type": "category", - "label": "Connect", - "items": [ - "howtos/api-connector", - "howtos/ctp-connector", - "howtos/mcp-connector-guide", - "howtos/query-api" - ] - }, - { - "type": "category", - "label": "Observe & Debug", - "items": [ - "howtos/observability", - "howtos/space-observability", - "howtos/debugging-a-ctp", - "howtos/troubleshooting" - ] - }, - { - "type": "category", - "label": "GitOps & Automation", - "items": [ - "howtos/automation-and-gitops/overview", - "howtos/gitops-with-argocd", - "howtos/use-argo" - ] - }, - { - "type": "category", - "label": "Data & Security", - "items": [ - "howtos/backup-and-restore", - "howtos/dr", - "howtos/secrets-management", - { - "type": "category", - "label": "Workload Identity", - "items": [ - "howtos/workload-id/backup-restore-config", - "howtos/workload-id/billing-config", - "howtos/workload-id/eso-config" - ] - } - ] - }, - { - "type": "category", - "label": "Billing & Licensing", - "items": [ - "howtos/billing" - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json deleted file mode 100644 index c932d0ac..00000000 --- a/self-hosted-spaces_versioned_sidebars/version-1.15-sidebars.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "doc", - "id": "self-hosted-spaces-quickstart" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/groups" - ] - }, - { - "type": "category", - "label": "Deploy", - "items": [ - "howtos/deployment-reqs", - "howtos/self-hosted-spaces-deployment", - "howtos/certs", - "howtos/attach-detach" - ] - }, - { - "type": "category", - "label": "Configure", - "items": [ - "howtos/configure-ha", - "howtos/oidc-configuration", - "howtos/scaling-resources", - "howtos/spaces-management" - ] - }, - { - "type": "category", - "label": "Control Planes", - "items": [ - "howtos/managed-service", - "howtos/control-plane-topologies", - "howtos/auto-upgrade", - "howtos/declarative-ctps", - "howtos/controllers", - "howtos/migrating-to-mcps", - "howtos/simulations" - ] - }, - { - "type": "category", - "label": "Connect", - "items": [ - "howtos/api-connector", - "howtos/ctp-connector", - "howtos/mcp-connector-guide", - "howtos/query-api" - ] - }, - { - "type": "category", - "label": "Observe & Debug", - "items": [ - "howtos/observability", - "howtos/space-observability", - "howtos/ctp-audit-logs", - "howtos/debugging-a-ctp", - "howtos/troubleshooting" - ] - }, - { - "type": "category", - "label": "GitOps & Automation", - "items": [ - "howtos/automation-and-gitops/overview", - "howtos/gitops-with-argocd", - "howtos/use-argo" - ] - }, - { - "type": "category", - "label": "Data & Security", - "items": [ - "howtos/backup-and-restore", - "howtos/dr", - "howtos/secrets-management", - { - "type": "category", - "label": "Workload Identity", - "items": [ - "howtos/workload-id/backup-restore-config", - "howtos/workload-id/billing-config", - "howtos/workload-id/eso-config" - ] - } - ] - }, - { - "type": "category", - "label": "Billing & Licensing", - "items": [ - "howtos/billing", - "howtos/capacity-licensing" - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json deleted file mode 100644 index b3429579..00000000 --- a/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json +++ /dev/null @@ -1,131 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "doc", - "id": "self-hosted-spaces-quickstart" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/groups", - "concepts/deployment-modes" - ] - }, - { - "type": "category", - "label": "Deploy", - "items": [ - "howtos/deployment-reqs", - "howtos/self-hosted-spaces-deployment", - "howtos/certs", - "howtos/proxies-config", - "howtos/ingress", - "howtos/ingress-nginx-migration", - "howtos/mirror-images", - "howtos/attach-detach" - ] - }, - { - "type": "category", - "label": "Configure", - "items": [ - "howtos/configure-ha", - "howtos/administer-features", - "howtos/oidc-configuration", - "howtos/scaling-resources", - "howtos/spaces-management" - ] - }, - { - "type": "category", - "label": "Control Planes", - "items": [ - "howtos/managed-service", - "howtos/control-plane-topologies", - "howtos/auto-upgrade", - "howtos/declarative-ctps", - "howtos/controllers", - "howtos/migrating-to-mcps", - "howtos/simulations" - ] - }, - { - "type": "category", - "label": "Connect", - "items": [ - "howtos/api-connector", - "howtos/ctp-connector", - "howtos/mcp-connector-guide", - "howtos/query-api", - "howtos/deploy-query-api" - ] - }, - { - "type": "category", - "label": "Observe & Debug", - "items": [ - "howtos/observability", - "howtos/space-observability", - { - "type": "category", - "label": "Tracing", - "items": [ - "howtos/tracing/overview", - "howtos/tracing/query-api", - "howtos/tracing/spaces-api", - "howtos/tracing/spaces-router" - ] - }, - "howtos/ctp-audit-logs", - "howtos/debugging-a-ctp", - "howtos/troubleshooting" - ] - }, - { - "type": "category", - "label": "GitOps & Automation", - "items": [ - "howtos/gitops", - "howtos/use-argo" - ] - }, - { - "type": "category", - "label": "Data & Security", - "items": [ - "howtos/backup-and-restore", - "howtos/dr", - "howtos/secrets-management", - { - "type": "category", - "label": "Workload Identity", - "items": [ - "howtos/workload-id/backup-restore-config", - "howtos/workload-id/billing-config", - "howtos/workload-id/eso-config" - ] - } - ] - }, - { - "type": "category", - "label": "Billing & Licensing", - "items": [ - "howtos/billing", - "howtos/capacity-licensing" - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} From eb3e05f2cffea5d50019be3dd3d2da2fd797e49f Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Tue, 7 Apr 2026 10:46:34 -0400 Subject: [PATCH 04/12] Adds tutorial versions of Instruqt scenarios --- docs/getstarted/crossplane-tutorial.md | 557 +++++++++++++++++++++++++ 1 file changed, 557 insertions(+) create mode 100644 docs/getstarted/crossplane-tutorial.md diff --git a/docs/getstarted/crossplane-tutorial.md b/docs/getstarted/crossplane-tutorial.md new file mode 100644 index 00000000..9121fe26 --- /dev/null +++ b/docs/getstarted/crossplane-tutorial.md @@ -0,0 +1,557 @@ +--- +title: Build a platform with Crossplane and Upbound +description: Deploy a real app with a cloud database, observe drift detection, enforce policies, and change infrastructure live — all from a single control plane. +weight: {weight} +--- +import Version from "@site/src/components/Version.js" +import { versions } from "@site/src/components/Version.js" +import CodeBlock from '@theme/CodeBlock'; + + +In this tutorial, you'll deploy an application with a PostgreSQL database on +AWS, watch Crossplane self-heal a manually changed resource, enforce security +policy, and +change live infrastructure — all by updating YAML files. + +By the end of this tutorial, you'll be able to: + +- Deploy a composite resource that creates multiple AWS resources from a single manifest +- Trigger drift detection and watch Crossplane correct an out-of-band change +- Block non-compliant requests using Kyverno before they reach Crossplane +- Update live infrastructure by changing desired state + +## Prerequisites + +Install the following: + +- [`kubectl`][kubectl-install] +- [AWS CLI][aws-cli], configured with credentials for an account you can create + resources in +- [kind][kind] + +### Download the `up` CLI + +The Upbound `up` command-line enables interaction with Upbound control planes. + +Install the `up` command-line via shell, Homebrew or Linux package. + + + +Install the latest version of the `up` command-line via shell script by +downloading the install script from [Upbound][upbound]. + +:::tip +Shell install is the preferred method for installing the `up` command-line. +::: + +The shell install script automatically determines the operating system and +platform architecture and installs the correct binary. + +```shell +curl -sL "https://cli.upbound.io" | sh +``` + +:::note +Install a specific version of `up` by providing the version. +For example, to install version use the following command: + + +{`curl -sL "https://cli.upbound.io" | VERSION=v${versions.cli} sh`} + + +Find the full list of versions in the Up command-line repository. +::: + + + + +Upbound provides a Windows executable. + + +{`curl.exe -sLo up.exe "https://cli.upbound.io/stable/v${versions.cli}/bin/windows_amd64/up.exe"`} + + +Find the full list of Windows versions in the [Up command-line +repository][win-versions]. + + + + + +[Homebrew][homebrew] is a package manager for Linux and Mac OS. + +Install the `up` command-line with a Homebrew `tap` using the command: + +```shell +brew install upbound/tap/up +``` + + + +Upbound provides both `.deb` and `.rpm` packages for Linux platforms. + +Downloading packages requires both the [version][version] and CPU architecture +(`linux_amd64`, `linux_arm`, `linux_arm64`). + +#### Debian package install + + +{`curl -sLo up.deb "https://cli.upbound.io/stable/v${versions.cli}/deb/up_${versions.cli}_linux_\${ARCH}.deb"`} + + + +#### RPM package install + + +{`curl -sLo up.rpm "https://cli.upbound.io/stable/v${versions.cli}/rpm/up_${versions.cli}_linux_\${ARCH}.rpm"`} + + + + + +The `up` CLI allows you to interact with your control plane. + +### Clone the demo repository + +```bash +git clone https://github.com/tr0njavolta/platform-demo +cd platform-demo +``` + +All file paths in this tutorial are relative to the root of that repository. + +### Set up a control plane + +You need a Kubernetes cluster with Upbound Crossplane (UXP) installed and connected +to the Upbound Console. + +1. Create a cluster. This tutorial uses `kind` to create a local cluster: + + ```bash + kind create cluster --name upbound-demo + ``` + + Create a namespace for the demo: + + ```shell + kubectl create namespace demo + ``` + +2. Install UXP: + + ```bash + up uxp install + ``` + +3. Connect the cluster to the Upbound Console by following the + [connect a control plane][connect-ctp] guide. + +### Install the AWS providers + +The demo uses three AWS providers: EC2, RDS, and IAM. The required packages and +versions are declared in `upbound.yaml`. Install them directly: + +1. Apply the provider packages: + + ```bash + kubectl apply -f - <<'EOF' + apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: provider-family-aws + spec: + package: xpkg.upbound.io/upbound/provider-family-aws:v2.4.0 + --- + apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: provider-aws-iam + spec: + package: xpkg.upbound.io/upbound/provider-aws-iam:v2.4.0 + --- + apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: provider-aws-rds + spec: + package: xpkg.upbound.io/upbound/provider-aws-rds:v2.4.0 + --- + apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: provider-aws-ec2 + spec: + package: xpkg.upbound.io/upbound/provider-aws-ec2:v2.4.0 + --- + apiVersion: pkg.crossplane.io/v1beta1 + kind: Function + metadata: + name: function-auto-ready + spec: + package: xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.6.1 + EOF + ``` + +2. Wait for all providers to become healthy: + + ```bash + kubectl get providers + ``` + + All providers should show `HEALTHY: True` before continuing. + +### Configure AWS credentials + +1. Create a Kubernetes secret with your AWS credentials: + + ```bash + kubectl create secret generic aws-secret \ + -n demo \ + --from-literal=creds="$(printf '[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n' \ + "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY")" + ``` + +2. Apply the `ProviderConfig` that references those credentials: + + ```bash + kubectl apply -f setup/config/ + ``` + +3. Verify the `ProviderConfig` is present: + + ```bash + kubectl get providerconfigs.aws.m.upbound.io default -n demo + ``` + +### Install the platform APIs + +Apply the XRDs and Compositions from the demo repository: + +```bash +kubectl apply -f apis/ +``` + +Verify the APIs are established: + +```bash +kubectl get xrd +``` + +All XRDs should show `ESTABLISHED: True`. + +:::info +AWS resource provisioning — especially RDS — takes 5–8 minutes. Each part of this +tutorial is structured so you can keep reading while AWS works. +::: + +## Deploy an app with a database + + +Open `examples/appwdb/example.yaml`: + +```yaml +apiVersion: demo.upbound.io/v1alpha1 +kind: AppWDB +metadata: + name: demo-01 + namespace: demo +spec: + parameters: + replicas: 2 + dbSize: db.t3.micro + region: eu-central-1 +``` + +This is the entire end-user interface. A developer fills in three fields: replica +count, database size, and AWS region. They don't see the VPC, subnets, IAM role, +or RDS configuration behind it. + +### Deploy it + +1. Apply the manifest: + + ```bash + kubectl apply -f examples/appwdb/example.yaml + ``` + +2. In the Upbound Console, click **View all Composite Resources**. You should see + `demo-01` listed with Crossplane actively reconciling it. + +That 10-line file expands into: + +- VPC + 3 subnets (eu-central-1a, b, c) +- RDS subnet group + PostgreSQL instance (gp3 storage) +- IAM role +- Kubernetes `Deployment` scaled to `replicas: 2` + +3. Open the AWS Console and set your region to **eu-central-1**. Check: + + - [IAM Roles](https://us-east-1.console.aws.amazon.com/iam/home#/roles) — look for `demo-01-role` + - [VPCs](https://eu-central-1.console.aws.amazon.com/vpcconsole/home#vpcs:) — look for `demo-01-vpc` + - [RDS Databases](https://eu-central-1.console.aws.amazon.com/rds/home#databases:) — watch for `demo-01-db` (takes 5–8 minutes) + + +4. Open `apis/appwdb/definition.yaml`. + + This is the XRD — it defines what end users can request. Notice `dbSize` is an enum, + not a free-text field. Users can't request a size the platform doesn't support. + +5. Open `apis/appwdb/composition.yaml`. + + This is the Composition — the mapping from those 10 lines to the full set of AWS + resources. It calls a function written in KCL. You can also write Composition + functions in [Go][fn-go], [Python][fn-python], or [Go Templating][fn-go-template], + and mix languages within a single pipeline. + +6. Open `functions/compose-resources/main.k`. + + This is the logic layer. It reads `dbSize` and `replicas` from the composite + resource and outputs every managed resource Crossplane will create. + + +7. Check the composite resource status: + + ```bash + kubectl get appwdb demo-01 -n demo + ``` + +8. Verify the `Deployment` came up (faster than RDS, since it's just a container): + + ```bash + kubectl get pods -n demo + ``` + +9. Describe a pod to confirm it's running. Replace `` with the name from + the previous output: + + ```bash + kubectl describe pod -n demo + ``` + +10. In the Upbound Console, click into `demo-01` and open the **relationship view** + to see the full resource tree and sync status for each composed resource. + + :::info + `demo-01-db` takes a few minutes to reach `SYNCED: True`. Continue to the + next section + while AWS finishes provisioning. + ::: + +## Providers and ProviderConfigs + +**Providers** are Kubernetes controllers that know how to create, update, and delete +resources in a specific cloud service — EC2, RDS, IAM, and so on. In Crossplane 2.0, +the Kubernetes `Deployment` is managed natively without a separate provider. + +**ProviderConfigs** tell those providers how to authenticate. The tutorial uses static +credentials, but production deployments can use OIDC, IRSA, Workload Identity, and +other methods depending on the provider. See [provider authentication][auth-docs]. + +### Verify the providers + +```bash +kubectl get providers +kubectl get providerconfigs.aws.m.upbound.io default -n demo +``` + +All providers should be `HEALTHY: True`. The `default` ProviderConfig is what connects +them to the AWS account where `demo-01-db` is provisioning. + +In the Upbound Console, navigate to `demo-01` and open the **relationship view**. You'll +see all composed resources — VPC, subnets, RDS instance, IAM role, and `Deployment` — +with their sync status and how they connect. + +## Drift detection + +Crossplane never stops watching. If someone changes a resource directly in AWS, Crossplane +detects the difference between desired state and actual state and corrects it. This is +**drift detection**. + +### Confirm the VPC is ready + +1. Verify the VPC is running: + + ```bash + kubectl get vpcs.ec2.aws.m.upbound.io demo-01-vpc -n demo + ``` + + Wait until `SYNCED: True`. + +2. In the AWS Console, navigate to **VPC → Your VPCs** and find `demo-01-vpc`. + +3. Click the **Name** tag and change it to something else — for example, + `demo-01-vpc-hacked`. Refresh to confirm the change took effect. + +4. Tell Crossplane to reconcile immediately instead of waiting for the next loop: + + ```bash + kubectl annotate vpcs.ec2.aws.m.upbound.io demo-01-vpc -n demo \ + reconcile.crossplane.io/trigger="$(date)" \ + --overwrite + ``` + +5. Watch the sync status: + + ```bash + kubectl get vpcs.ec2.aws.m.upbound.io demo-01-vpc -n demo -w \ + -o custom-columns='NAME:.metadata.name,SYNCED:.status.conditions[?(@.type=="Synced")].reason' + ``` + +6. Switch to the AWS Console and watch the Name tag snap back to `demo-01-vpc`. + +7. Verify the reconciliation: + + ```bash + kubectl get appwdb demo-01 -n demo + ``` + + `SYNCED: True` confirms the control plane corrected the drift. + +## Add policy enforcement + +**Kyverno** is a policy engine that intercepts Kubernetes admission requests before +they're accepted. A policy violation is blocked before Crossplane runs — nothing +reaches AWS. + + +1. Apply the Kyverno add-on from the Upbound Marketplace: + + ```bash + kubectl apply -f w-kyverno/addon-kyverno.yaml + ``` + +2. In the Upbound Console, select **AddOns** in the left navigation. Wait for + `upbound-addon-kyverno` to become healthy (~2 minutes). + + +3. Apply the policy: + + ```bash + kubectl apply -f w-kyverno/policy-no-privileged.yaml + kubectl get clusterpolicy + ``` + + `READY: True` means the policy is active. `disallow-privileged-containers` rejects + any `AppWDBSecure` request where `securityContext.privileged` is `true` — at + admission time, before Crossplane sees it. + + +4. Next, trigger a policy violation. Open `examples/appwdbsecure/example-1.yaml`. It has `securityContext.privileged: true`. + +5. Try to apply it: + + ```bash + kubectl apply -f examples/appwdbsecure/example-1.yaml + ``` + + The request is blocked immediately. The error message tells you exactly which policy + caught it. Nothing was created. + + :::info + `demo-01` — deployed before Kyverno was installed — has a running RDS instance + right now. This request didn't start at all. + ::: + +6. Apply the compliant request: + + ```bash + kubectl apply -f examples/appwdbsecure/example-2.yaml + kubectl get appwdbsecure -n demo -w + ``` + + `privileged: false` passes the policy check and starts provisioning. This takes + ~10 minutes. + +7. Verify the policy enforcement: + + ```bash + kubectl get clusterpolicy disallow-privileged-containers + ``` + + `READY: True` confirms the policy is enforcing. + +## Change it live + +To change infrastructure, update the desired state. Crossplane figures out what needs +to change and does it. + + +Scale the database + +1. Apply the change: + + ```bash + kubectl apply -f examples/appwdb/variant-bigger-db.yaml + ``` + +2. Watch the status. `DESIRED` updates immediately; `ACTUAL` updates once AWS finishes + (~5 minutes): + + ```bash + kubectl get instance.rds.aws.m.upbound.io demo-01-db -n demo -w \ + -o custom-columns='NAME:.metadata.name,DESIRED:.spec.forProvider.instanceClass,ACTUAL:.status.atProvider.instanceClass,SYNCED:.status.conditions[?(@.type=="Synced")].reason' + ``` + +3. In the AWS Console, check the **Status** and **Size** columns for `demo-01-db`. + +4. In the Upbound Console, navigate to `demo-01` and open the **relationship view** + to see the updated resource tree. + +5. Confirm the change took effect: + + ```bash + kubectl get appwdb demo-01 -n demo + ``` + + `SYNCED: True` with your updated `dbSize` or `replicas` means you're done. + +## Clean up + +Delete the composite resources. Crossplane deletes all composed AWS resources +(RDS instance, VPC, subnets, IAM role) before the composite resource is removed. + +```shell +kubectl delete appwdbsecure kyverno-demo-01 -n demo +kubectl delete appwdb demo-01 -n demo +``` + +RDS deletion takes 5–10 minutes. Wait until both resources are fully removed before +deleting the cluster: + +```shell +kubectl get appwdb,appwdbsecure -n demo -w +``` + +Once both are gone, delete the kind cluster: + +```shell +kind delete cluster --name upbound-demo +``` + +## Next steps + +In this tutorial, you: + +- Deployed a composite resource that created a VPC, subnets, IAM role, RDS instance, + and Kubernetes `Deployment` from a 10-line manifest +- Watched Crossplane detect and correct an out-of-band change to a VPC tag +- Blocked a privileged container request with Kyverno before it reached the cluster +- Updated live infrastructure by changing a field in desired state + + +- [Composite Resource Definitions][xrd-concept] — design your own platform APIs +- [Composition functions][fn-docs] — write the logic that maps user requests to resources +- [Provider authentication][auth-docs] — connect providers to your own cloud account +- [Upbound Marketplace][marketplace] — providers and add-ons for AWS, Azure, GCP, and more + +[kubectl-install]: https://kubernetes.io/docs/tasks/tools/ +[up-cli]: /manuals/cli/overview +[aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html +[connect-ctp]: {link-to-connect-control-plane-guide} +[fn-go]: /manuals/cli/howtos/compositions/go/ +[fn-python]: /manuals/cli/howtos/compositions/python/ +[fn-go-template]: /manuals/cli/howtos/compositions/go-template/ +[xrd-concept]: /manuals/packages/xrds/ +[fn-docs]: /manuals/cli/howtos/compositions/ +[auth-docs]: /manuals/packages/providers/authentication/ +[marketplace]: https://marketplace.upbound.io/ From b5cffabd9b13a1d39ab5b32edc64d1d8909e2f80 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Thu, 23 Apr 2026 15:48:26 -0400 Subject: [PATCH 05/12] adds ai tutorial --- docs/getstarted/ai-controller-tutorial.md | 626 ++++++++++++ docs/getstarted/crossplane-tutorial.md | 1074 +++++++++++++++------ 2 files changed, 1403 insertions(+), 297 deletions(-) create mode 100644 docs/getstarted/ai-controller-tutorial.md diff --git a/docs/getstarted/ai-controller-tutorial.md b/docs/getstarted/ai-controller-tutorial.md new file mode 100644 index 00000000..2c00527e --- /dev/null +++ b/docs/getstarted/ai-controller-tutorial.md @@ -0,0 +1,626 @@ +--- +title: Build an AI controller with Crossplane +description: Deploy a WatchOperation that uses a local LLM to enforce platform policy — no Go, no operator framework, just YAML and a plain-English rule. +weight: {weight} +validation: + type: walkthrough + owner: docs@upbound.io + environment: local-upbound + timeout: 45m + variables: + HOST_IP: "" +--- + +In this tutorial, you run a Kubernetes controller whose reconciliation logic is +written in plain English. A Crossplane `WatchOperation` watches an nginx +`Deployment` and calls a local LLM whenever it changes. The LLM reads the +current state, applies the rule in its `systemPrompt`, and returns a corrected +manifest. Crossplane applies it. + +By the end of this tutorial, you can: + +- Run a Crossplane `WatchOperation` that calls a local LLM +- Watch the controller detect and correct a policy violation automatically +- Update the enforcement rule by editing a single field in YAML + +The model running in this tutorial is `qwen2.5:1.5b` via Ollama — running +entirely on your local machine. No cloud API key is required. + +## Prerequisites + +Install the following before starting: + +- [Docker][docker-install], running locally +- [`kubectl`][kubectl-install] +- [`kind`][kind-install] + +### Install the up CLI + +This tutorial requires up CLI v0.44.3. + +```shell +curl -sL "https://cli.upbound.io" | VERSION=v0.44.3 sh +``` + +Move the binary into your `PATH`: + +```shell +sudo mv up /usr/local/bin/ +``` + +If you don't have `sudo` access, install to a user-local directory instead: + +```shell +mkdir -p ~/.local/bin && mv up ~/.local/bin/ +``` + +Then add it to your shell profile (`~/.bashrc`, `~/.zshrc`, or equivalent): + +```shell +export PATH="$HOME/.local/bin:$PATH" +``` + +Verify the installation: + +```shell +up version +``` + +## Create the project + +### Create the project directory + +```bash +mkdir english-controller +cd english-controller +``` + +All commands from this point run from inside the `english-controller` directory. + +### Create the project manifest + +The `upbound.yaml` file declares the project and its function dependencies. +`up project run --local` reads this file to know which packages to install into +the cluster. Create it with: + +```bash +cat > upbound.yaml <<'EOF' +apiVersion: meta.dev.upbound.io/v2alpha1 +kind: Project +metadata: + name: english-controller +spec: + dependsOn: + - apiVersion: pkg.crossplane.io/v1 + kind: Function + # function-auto-ready marks composed resources as ready automatically; + # required by Crossplane's composition machinery even when not used directly. + package: xpkg.upbound.io/crossplane-contrib/function-auto-ready + version: '>=v0.0.0' + - apiVersion: pkg.crossplane.io/v1 + kind: Function + # function-openai is the function the WatchOperation calls to reach the LLM. + package: xpkg.upbound.io/upbound/function-openai + version: v0.3.0 + description: A Kubernetes controller whose enforcement logic is written in plain English. +EOF +``` + +### Create the `WatchOperation` + +The `WatchOperation` is the controller — it defines what to watch and what +function to call when the watched resource changes. + +```bash +mkdir -p operations/replicas +cat > operations/replicas/operation.yaml <<'EOF' +apiVersion: ops.crossplane.io/v1alpha1 +kind: WatchOperation +metadata: + name: replicas +spec: + concurrencyPolicy: Forbid + successfulHistoryLimit: 3 + failedHistoryLimit: 1 + operationTemplate: + spec: + mode: Pipeline + pipeline: + - functionRef: + name: upbound-function-openai + input: + apiVersion: openai.fn.upbound.io/v1alpha1 + kind: Prompt + systemPrompt: |- + You are a Kubernetes controller. Output raw YAML only — no markdown, no code fences, no backticks, no explanations. + + Rule: if spec.replicas is less than 3, set it to 3. Otherwise keep it unchanged. + userPrompt: |- + Inspect the nginx Deployment and output the corrected manifest. + Output only the Deployment manifest with the correct spec.replicas value. + Include apiVersion, kind, metadata (name: nginx, namespace: default), and spec. + Start your response with 'apiVersion:' + step: deployment-analysis + credentials: + - name: gpt + source: Secret + secretRef: + namespace: crossplane-system + name: gpt + watch: + apiVersion: apps/v1 + kind: Deployment + namespace: default +EOF +``` + +:::info +With a larger model like `gpt-4o` or `gpt-oss:20b`, the `systemPrompt` can be +much simpler — just the rule itself, without the output format instructions. +The explicit YAML output guidance in `userPrompt` is needed specifically for +`qwen2.5:1.5b`. +::: + +## Set up Ollama + +Ollama runs the LLM locally. Install it and pull the model before starting the +cluster — the model is ~1 GB. + +### Install Ollama + +```shell +curl -fsSL https://ollama.com/install.sh | sh +``` + +If the install script doesn't work for your OS, download directly from +[ollama.com/download][ollama-download]. + +### Start Ollama + +On Linux, the install script registers a systemd service that starts Ollama +automatically. On macOS, Ollama may not start automatically after installation. +If `ollama list` returns "could not connect to ollama server", start it manually +in a separate terminal before continuing: + +``` +ollama serve +``` + +Verify it's ready: + +```shell +ollama list +``` + +### Pull the model + +```shell +ollama pull qwen2.5:1.5b +``` + +Confirm the model downloaded: + +```shell +ollama list +``` + +You should see `qwen2.5:1.5b` in the output. + +## Start the project + +Run `up project run --local` from inside the `english-controller` directory. +This command creates a kind cluster, installs UXP, and deploys all packages and +APIs defined in the project. It exits when the cluster is ready. + +```bash +up project run --local --control-plane-version=2.1.4-up.2 +``` + +The `--control-plane-version` flag pins the UXP version installed into the kind +cluster. This tutorial was tested with `2.1.4-up.2`. If you need a different +version, find available version strings in the [UXP release notes][uxp-releases]. + +This takes several minutes on first run — it pulls provider packages and sets up +the cluster. Subsequent runs are faster. + +:::warning +If `up project run --local` exits non-zero and prints `traces export: context +deadline exceeded`, check whether providers were installed: + +```bash +kubectl get providers +``` + +If providers appear, provisioning succeeded despite the telemetry error. +If the list is empty, provisioning failed. Run +`kind delete cluster --name up-app-w-db` and retry. Verify your network +allows outbound connections to `xpkg.upbound.io` on port 443. +::: + +Once the command completes, set your kubeconfig. `up project run --local` names +the kind cluster `up-app-w-db` by default: + +```bash +kind get kubeconfig --name up-app-w-db > ~/.kube/config +``` + +:::warning +This overwrites your existing `~/.kube/config`. To preserve existing contexts, +merge instead: + +```bash +kind get kubeconfig --name up-app-w-db > ~/.kube/config-upbound +KUBECONFIG=~/.kube/config:~/.kube/config-upbound \ + kubectl config view --flatten > ~/.kube/config.merged +mv ~/.kube/config.merged ~/.kube/config +``` +::: + +Verify the connection: + +```bash +kubectl get nodes +``` + +## Wire Ollama into the cluster + +The kind cluster's pods need to reach Ollama running on your host. This step +creates a Kubernetes `Service` and `Endpoints` resource that route cluster +traffic to your host machine. + +1. Get the host IP on the kind bridge network: + + **Linux:** + + ```bash + HOST_IP=$(docker network inspect kind -f '{{range .IPAM.Config}}{{.Gateway}}{{end}}') + echo "Host IP: $HOST_IP" + ``` + + **macOS (Docker Desktop):** + + ```bash + HOST_IP=$(docker run --rm alpine sh -c 'getent hosts host.docker.internal' 2>/dev/null | awk '{print $1}') + echo "Host IP: $HOST_IP" + ``` + +2. Create the `ollama` namespace and register Ollama as a cluster service: + + ```bash + kubectl create namespace ollama --dry-run=client -o yaml | kubectl apply -f - + + kubectl apply -f - < +``` + +The `Events` section shows the exact YAML the model returned. + +## Part 2: Watch it self-heal + +The controller re-evaluates on every change. If something modifies the +`Deployment` — a human, a CI pipeline, a rollout — the rule re-applies. +This is drift detection with reasoning. + +### Trigger a violation + +Scale nginx down to 1 replica: + +```bash +kubectl scale deployment nginx --replicas=1 +``` + +### Watch it recover + +```bash +kubectl get deployment nginx -w +``` + +Within 30–60 seconds, replicas climb back to 3. The `WatchOperation` fired +because the `Deployment` changed. The LLM saw 1 replica, decided it violated +the rule, and patched it. + +Press Ctrl+C when replicas are back at 3. + +### Inspect what fired + +```bash +kubectl get operations +``` + +Each entry is a new record. The most recent one captured the scale-down event +and the correction. + +## Part 3: Update the rules + +The enforcement logic is a text field. To change the policy, edit `systemPrompt` +and re-apply. + +### Open the operation + +```bash +cat operations/replicas/operation.yaml +``` + +### Change the minimum replicas to 5 + +Find the `systemPrompt` and update the rule line. Change: + +```text +Rule: if spec.replicas is less than 3, set it to 3. Otherwise keep it unchanged. +``` + +To: + +```text +Rule: if spec.replicas is less than 5, set it to 5. Otherwise keep it unchanged. +``` + +Edit the file directly: + +**macOS:** + +```bash +sed -i '' 's/less than 3, set it to 3/less than 5, set it to 5/' \ + operations/replicas/operation.yaml +``` + +**Linux:** + +```bash +sed -i 's/less than 3, set it to 3/less than 5, set it to 5/' \ + operations/replicas/operation.yaml +``` + +### Apply the updated operation + +```bash +kubectl apply -f operations/replicas/operation.yaml +``` + +### Trigger and observe + +Scale nginx down to 1 to trigger the new rule: + +```bash +kubectl scale deployment nginx --replicas=1 +``` + +Watch the updated rule enforce 5 replicas: + +```bash +kubectl get deployment nginx -w +``` + +This takes 30–45 seconds. Press Ctrl+C when you see 5 ready replicas. + +### Verify + +```bash +kubectl get watchoperations +kubectl get operations +``` + +Same architecture, different policy — changed by editing a text field. + +:::tip +Try adding a condition to the rule: + +``` +If the deployment name contains 'prod', require at least 5 replicas. +Otherwise, require at least 2. +``` + +The model interprets natural language conditions the same way it interprets +simple numeric rules. +::: + +## Clean up + +Delete the demo resources: + +```bash +kubectl delete watchoperation replicas +kubectl delete operations --all +kubectl delete deployment nginx +``` + +Delete the cluster: + +```bash +kind delete cluster --name up-app-w-db +``` + +## Next steps + +In this tutorial, you: + +- Created a Crossplane project with `upbound.yaml` and a `WatchOperation` +- Deployed a controller that calls a local LLM on every `Deployment` change +- Watched the controller detect and correct a replica count violation +- Updated the enforcement policy by editing a single field in YAML + +Continue with: + +- [WatchOperations reference][watchops-ref] — triggers, concurrency, history + limits, and output handling +- [Composition functions][fn-docs] — build custom logic for any resource +- [Provider authentication][auth-docs] — connect providers to your own cloud + account +- [Upbound Marketplace][marketplace] — functions and providers for AWS, Azure, + GCP, and more + +[docker-install]: https://docs.docker.com/get-docker/ +[kubectl-install]: https://kubernetes.io/docs/tasks/tools/ +[kind-install]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation +[ollama-download]: https://ollama.com/download +[up-cli-releases]: https://github.com/upbound/up/releases +[uxp-releases]: /reference/release-notes/ +[watchops-ref]: /manuals/crossplane/operations/watch/ +[fn-docs]: /manuals/cli/howtos/compositions/ +[auth-docs]: /manuals/packages/providers/authentication/ +[marketplace]: https://marketplace.upbound.io/ diff --git a/docs/getstarted/crossplane-tutorial.md b/docs/getstarted/crossplane-tutorial.md index 9121fe26..99b70af9 100644 --- a/docs/getstarted/crossplane-tutorial.md +++ b/docs/getstarted/crossplane-tutorial.md @@ -1,208 +1,572 @@ --- -title: Build a platform with Crossplane and Upbound +title: Build a platform with Upbound description: Deploy a real app with a cloud database, observe drift detection, enforce policies, and change infrastructure live — all from a single control plane. weight: {weight} +validation: + type: walkthrough + owner: docs@upbound.io + environment: local-upbound + timeout: 30m + variables: + AWS_ACCESS_KEY_ID: "" + AWS_SECRET_ACCESS_KEY: "" --- -import Version from "@site/src/components/Version.js" -import { versions } from "@site/src/components/Version.js" -import CodeBlock from '@theme/CodeBlock'; +In this tutorial, you deploy an application with a PostgreSQL database on AWS, +watch Crossplane self-heal a manually changed resource, enforce security policy, +and change live infrastructure — all by updating YAML files. -In this tutorial, you'll deploy an application with a PostgreSQL database on -AWS, watch Crossplane self-heal a manually changed resource, enforce security -policy, and -change live infrastructure — all by updating YAML files. - -By the end of this tutorial, you'll be able to: +By the end of this tutorial, you can: - Deploy a composite resource that creates multiple AWS resources from a single manifest - Trigger drift detection and watch Crossplane correct an out-of-band change -- Block non-compliant requests using Kyverno before they reach Crossplane +- Block non-compliant requests with Kyverno before they reach Crossplane - Update live infrastructure by changing desired state ## Prerequisites -Install the following: +Install the following tools before starting: - [`kubectl`][kubectl-install] -- [AWS CLI][aws-cli], configured with credentials for an account you can create - resources in +- [AWS CLI][aws-cli], configured with credentials for an account where you can create resources - [kind][kind] -### Download the `up` CLI +### Install the up CLI -The Upbound `up` command-line enables interaction with Upbound control planes. +Install the `up` CLI via shell script: -Install the `up` command-line via shell, Homebrew or Linux package. +```shell +curl -sL "https://cli.upbound.io" | sh +``` - - -Install the latest version of the `up` command-line via shell script by -downloading the install script from [Upbound][upbound]. +If the script fails, download a specific version directly from [GitHub releases][up-cli-releases]. -:::tip -Shell install is the preferred method for installing the `up` command-line. -::: +Move the binary into your `PATH`: -The shell install script automatically determines the operating system and -platform architecture and installs the correct binary. +```shell +sudo mv up /usr/local/bin/ +``` + +If you don't have `sudo` access, install to a user-local directory instead: ```shell -curl -sL "https://cli.upbound.io" | sh +mkdir -p ~/.local/bin && mv up ~/.local/bin/ ``` -:::note -Install a specific version of `up` by providing the version. -For example, to install version use the following command: +Then add it to your `PATH` permanently by adding this line to your shell +profile (`~/.bashrc`, `~/.zshrc`, or equivalent): - -{`curl -sL "https://cli.upbound.io" | VERSION=v${versions.cli} sh`} - +```shell +export PATH="$HOME/.local/bin:$PATH" +``` -Find the full list of versions in the Up command-line repository. -::: +## Create the project - +### Create the project directory - -Upbound provides a Windows executable. +```bash +mkdir platform-demo +cd platform-demo +``` + +All commands from this point run from inside the `platform-demo` directory. - -{`curl.exe -sLo up.exe "https://cli.upbound.io/stable/v${versions.cli}/bin/windows_amd64/up.exe"`} - +### Create the project manifest -Find the full list of Windows versions in the [Up command-line -repository][win-versions]. +The `upbound.yaml` file declares the project and its provider and function +dependencies. `up project run --local` reads this file to determine what +packages to install into the cluster. +```bash +cat > upbound.yaml <<'EOF' +apiVersion: meta.dev.upbound.io/v2alpha1 +kind: Project +metadata: + name: app-w-db +spec: + apiDependencies: + - k8s: + version: v1.33.0 + type: k8s + dependsOn: + - apiVersion: pkg.crossplane.io/v1 + kind: Provider + # provider-family-aws installs shared config and authentication infrastructure. + package: xpkg.upbound.io/upbound/provider-family-aws + version: v2.4.0 + - apiVersion: pkg.crossplane.io/v1 + kind: Provider + # provider-aws-iam manages IAM roles and policies. + package: xpkg.upbound.io/upbound/provider-aws-iam + version: v2.4.0 + - apiVersion: pkg.crossplane.io/v1 + kind: Provider + # provider-aws-rds manages RDS instances and subnet groups. + package: xpkg.upbound.io/upbound/provider-aws-rds + version: v2.4.0 + - apiVersion: pkg.crossplane.io/v1 + kind: Provider + # provider-aws-ec2 manages VPCs and subnets. + package: xpkg.upbound.io/upbound/provider-aws-ec2 + version: v2.4.0 + - apiVersion: pkg.crossplane.io/v1beta1 + kind: Function + # function-auto-ready marks composed resources as ready automatically. + package: xpkg.upbound.io/crossplane-contrib/function-auto-ready + version: v0.6.1 + description: A Crossplane composition that provisions a web application with a + managed database (RDS), networking (VPC/Subnets), IAM role, and a Kubernetes Deployment. + license: Apache-2.0 + maintainer: Upbound User +EOF +``` - +### Define the platform APIs - -[Homebrew][homebrew] is a package manager for Linux and Mac OS. +The platform exposes two APIs: `AppWDB` (a basic app with a database) and +`AppWDBSecure` (the same API with an optional security context, used later for +policy enforcement). -Install the `up` command-line with a Homebrew `tap` using the command: +Create the API directory and XRD for `AppWDB`: -```shell -brew install upbound/tap/up +```bash +mkdir -p apis/appwdb +cat > apis/appwdb/definition.yaml <<'EOF' +apiVersion: apiextensions.crossplane.io/v2 +kind: CompositeResourceDefinition +metadata: + name: appwdbs.demo.upbound.io +spec: + group: demo.upbound.io + names: + categories: + - crossplane + kind: AppWDB + plural: appwdbs + scope: Namespaced + versions: + - name: v1alpha1 + referenceable: true + schema: + openAPIV3Schema: + description: AppWDB is the Schema for the AppWDB API. + properties: + spec: + description: AppWDBSpec defines the desired state of AppWDB. + type: object + properties: + parameters: + type: object + description: AppWDB configuration parameters + properties: + replicas: + type: integer + default: 2 + description: Number of app replicas + dbSize: + type: string + default: db.t3.micro + enum: + - db.t3.micro + - db.t3.small + - db.t3.medium + description: RDS instance class + region: + type: string + default: eu-central-1 + description: AWS region + required: + - parameters + status: + description: AppWDBStatus defines the observed state of AppWDB. + type: object + required: + - spec + type: object + served: true +EOF ``` - - -Upbound provides both `.deb` and `.rpm` packages for Linux platforms. +Create the XRD for `AppWDBSecure`: -Downloading packages requires both the [version][version] and CPU architecture -(`linux_amd64`, `linux_arm`, `linux_arm64`). +```bash +mkdir -p apis/appwdbsecure +cat > apis/appwdbsecure/definition.yaml <<'EOF' +apiVersion: apiextensions.crossplane.io/v2 +kind: CompositeResourceDefinition +metadata: + name: appwdbsecures.demo.upbound.io +spec: + group: demo.upbound.io + names: + categories: + - crossplane + kind: AppWDBSecure + plural: appwdbsecures + scope: Namespaced + versions: + - name: v1alpha1 + referenceable: true + schema: + openAPIV3Schema: + description: AppWDBSecure is the Schema for the AppWDBSecure API. + properties: + spec: + description: AppWDBSecureSpec defines the desired state of AppWDBSecure. + type: object + properties: + parameters: + type: object + description: AppWDBSecure configuration parameters + properties: + replicas: + type: integer + default: 2 + description: Number of app replicas + dbSize: + type: string + default: db.t3.micro + enum: + - db.t3.micro + - db.t3.small + - db.t3.medium + description: RDS instance class + region: + type: string + default: eu-central-1 + description: AWS region + securityContext: + type: object + description: Optional security context for the application container + properties: + privileged: + type: boolean + description: Run container as privileged. Blocked by platform policy. + required: + - parameters + status: + description: AppWDBSecureStatus defines the observed state of AppWDBSecure. + type: object + required: + - spec + type: object + served: true +EOF +``` -#### Debian package install +### Create the Compositions - -{`curl -sLo up.deb "https://cli.upbound.io/stable/v${versions.cli}/deb/up_${versions.cli}_linux_\${ARCH}.deb"`} - +Both APIs share the same composition function, `app-w-dbcompose-resources`, +which is the KCL function you create in the next step. - -#### RPM package install +```bash +cat > apis/appwdb/composition.yaml <<'EOF' +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + labels: + provider: aws + type: app-w-db + name: appwdbs.demo.upbound.io +spec: + compositeTypeRef: + apiVersion: demo.upbound.io/v1alpha1 + kind: AppWDB + mode: Pipeline + pipeline: + - step: compose-resources + functionRef: + name: app-w-dbcompose-resources + - step: automatically-detect-ready-composed-resources + functionRef: + name: crossplane-contrib-function-auto-ready +EOF + +cat > apis/appwdbsecure/composition.yaml <<'EOF' +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + labels: + provider: aws + type: app-w-db-secure + name: appwdbsecures.demo.upbound.io +spec: + compositeTypeRef: + apiVersion: demo.upbound.io/v1alpha1 + kind: AppWDBSecure + mode: Pipeline + pipeline: + - step: compose-resources + functionRef: + name: app-w-dbcompose-resources + - step: automatically-detect-ready-composed-resources + functionRef: + name: crossplane-contrib-function-auto-ready +EOF +``` - -{`curl -sLo up.rpm "https://cli.upbound.io/stable/v${versions.cli}/rpm/up_${versions.cli}_linux_\${ARCH}.rpm"`} - +### Create the composition function - - +The composition function is a KCL program that maps the user's 10-line request +to the full set of AWS resources. Create the function directory and package +manifest: -The `up` CLI allows you to interact with your control plane. +```bash +mkdir -p functions/compose-resources +cat > functions/compose-resources/kcl.mod <<'EOF' +[package] +name = "compose-resources" +version = "0.1.0" +EOF +``` -### Clone the demo repository +Create the composition logic in `main.k`. This is the entire implementation — +it reads from the composite resource and outputs every managed resource +Crossplane creates: ```bash -git clone https://github.com/tr0njavolta/platform-demo -cd platform-demo +cat > functions/compose-resources/main.k <<'EOF' +oxr = option("params").oxr +ocds = option("params").ocds + +params = oxr.spec.parameters +appName = oxr.metadata.name +region = params.region or "eu-central-1" +dbSize = params.dbSize or "db.t3.micro" +replicas = params.replicas or 2 + +_is_deleting = bool(oxr.metadata?.deletionTimestamp) +_db_key = "${appName}-db" +_instance_still_exists = _db_key in ocds + +_metadata = lambda name: str -> any { + { + namespace: oxr.metadata.namespace + annotations: {"krm.kcl.dev/composition-resource-name": name} + } +} + +_defaults = { + managementPolicies: ["*"] + providerConfigRef: {kind: "ProviderConfig", name: "default"} +} + +_subnets = [ + {cidrBlock: "10.0.1.0/24", availabilityZone: "${region}a", suffix: "a"} + {cidrBlock: "10.0.2.0/24", availabilityZone: "${region}b", suffix: "b"} + {cidrBlock: "10.0.3.0/24", availabilityZone: "${region}c", suffix: "c"} +] + +_sg_items = [{ + apiVersion: "rds.aws.m.upbound.io/v1beta1" + kind: "SubnetGroup" + metadata: _metadata("${appName}-subnet-group") | {name: "${appName}-subnet-group"} + spec: _defaults | { + forProvider: { + region: region + description: "${appName} DB subnet group" + subnetIdSelector: {matchControllerRef: True} + } + } +}] if not _is_deleting or _instance_still_exists else [] + +_db_items = [{ + apiVersion: "rds.aws.m.upbound.io/v1beta1" + kind: "Instance" + metadata: _metadata("${appName}-db") | { + name: "${appName}-db" + annotations: {"crossplane.io/external-name": "${appName}-db"} + } + spec: _defaults | { + forProvider: { + region: region + identifier: "${appName}-db" + engine: "postgres" + engineVersion: "16.6" + instanceClass: dbSize + username: "demoadmin" + dbName: "appdb" + autoGeneratePassword: True + passwordSecretRef: {name: "${appName}-db-password", key: "password"} + applyImmediately: True + skipFinalSnapshot: True + allocatedStorage: 20 + storageType: "gp3" + storageEncrypted: False + publiclyAccessible: False + backupRetentionPeriod: 0 + dbSubnetGroupNameSelector: {matchControllerRef: True} + } + initProvider: {identifier: "${appName}-db"} + } +}] if not _is_deleting else [] + +_items = [ + { + apiVersion: "ec2.aws.m.upbound.io/v1beta1" + kind: "VPC" + metadata: _metadata("${appName}-vpc") | {name: "${appName}-vpc"} + spec: _defaults | { + forProvider: { + region: region + cidrBlock: "10.0.0.0/16" + enableDnsHostnames: True + enableDnsSupport: True + tags: {"Name": "${appName}-vpc"} + } + } + } +] + [ + { + apiVersion: "ec2.aws.m.upbound.io/v1beta1" + kind: "Subnet" + metadata: _metadata("${appName}-subnet-${s.suffix}") | {name: "${appName}-subnet-${s.suffix}"} + spec: _defaults | { + forProvider: { + region: region + cidrBlock: s.cidrBlock + availabilityZone: s.availabilityZone + vpcIdSelector: {matchControllerRef: True} + tags: {"Name": "${appName}-subnet-${s.suffix}"} + } + } + } for s in _subnets +] + _sg_items + _db_items + [ + { + apiVersion: "iam.aws.m.upbound.io/v1beta1" + kind: "Role" + metadata: _metadata("${appName}-role") | {name: "${appName}-role"} + spec: _defaults | { + forProvider: { + assumeRolePolicy: '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":"ec2.amazonaws.com"},"Action":"sts:AssumeRole"}]}' + } + } + } + { + apiVersion: "apps/v1" + kind: "Deployment" + metadata: _metadata("${appName}-deployment") | {name: appName} + spec: { + replicas: replicas + selector: {matchLabels: {app: appName}} + template: { + metadata: {labels: {app: appName}} + spec: { + containers: [ + { + name: "app" + image: "public.ecr.aws/nginx/nginx:stable-alpine" + ports: [{containerPort: 80}] + } | ({securityContext: {privileged: params.securityContext.privileged}} if params?.securityContext?.privileged != None else {}) + ] + } + } + } + } +] + +items = _items +EOF ``` -All file paths in this tutorial are relative to the root of that repository. +### Create the ProviderConfig -### Set up a control plane +The `ProviderConfig` tells the AWS providers where to find credentials. Create +it now — you apply it after providers are healthy. -You need a Kubernetes cluster with Upbound Crossplane (UXP) installed and connected -to the Upbound Console. +```bash +mkdir -p setup/config +cat > setup/config/aws-provider-config.yaml <<'EOF' +apiVersion: aws.m.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: default + namespace: demo +spec: + credentials: + source: Secret + secretRef: + namespace: demo + name: aws-secret + key: creds +EOF +``` -1. Create a cluster. This tutorial uses `kind` to create a local cluster: +## Configure AWS credentials - ```bash - kind create cluster --name upbound-demo - ``` +The demo creates real AWS resources. You need credentials with permissions to +create VPCs, subnets, IAM roles, and RDS instances. - Create a namespace for the demo: +Export your credentials: - ```shell - kubectl create namespace demo - ``` +```bash +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +``` -2. Install UXP: +## Start the project - ```bash - up uxp install - ``` +Open a dedicated terminal window and run from inside the `platform-demo` directory: -3. Connect the cluster to the Upbound Console by following the - [connect a control plane][connect-ctp] guide. +```bash +up project run --local +``` -### Install the AWS providers +Leave this terminal running for the duration of the tutorial. This command: -The demo uses three AWS providers: EC2, RDS, and IAM. The required packages and -versions are declared in `upbound.yaml`. Install them directly: +- Creates a kind cluster named `up-app-w-db` (the default name for `up project run --local`) +- Installs UXP into the cluster +- Builds and deploys the KCL composition function +- Installs the AWS providers declared in `upbound.yaml` +- Applies the XRDs and Compositions from `apis/` -1. Apply the provider packages: +Startup takes several minutes. Once the command prints output confirming the +cluster is created and providers are installing, open a second terminal, +`cd` into the `platform-demo` directory, and continue with the steps below. - ```bash - kubectl apply -f - <<'EOF' - apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: provider-family-aws - spec: - package: xpkg.upbound.io/upbound/provider-family-aws:v2.4.0 - --- - apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: provider-aws-iam - spec: - package: xpkg.upbound.io/upbound/provider-aws-iam:v2.4.0 - --- - apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: provider-aws-rds - spec: - package: xpkg.upbound.io/upbound/provider-aws-rds:v2.4.0 - --- - apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: provider-aws-ec2 - spec: - package: xpkg.upbound.io/upbound/provider-aws-ec2:v2.4.0 - --- - apiVersion: pkg.crossplane.io/v1beta1 - kind: Function - metadata: - name: function-auto-ready - spec: - package: xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.6.1 - EOF - ``` +:::warning +`up project run --local` may print `traces export: context deadline exceeded` +in stderr. This is a non-fatal telemetry export error — it does not mean +provisioning failed. Check whether providers were actually installed by running +`kubectl get providers` in the second terminal. If providers appear, continue. -2. Wait for all providers to become healthy: +If `up project run --local` exits non-zero AND `kubectl get providers` returns +**No resources found**, provisioning did fail. Run +`kind delete cluster --name up-app-w-db` and restart from this step. Verify +your network allows outbound connections to `xpkg.upbound.io` on port 443. +::: - ```bash - kubectl get providers - ``` +### Configure kubectl - All providers should show `HEALTHY: True` before continuing. +Once `up project run --local` has created the cluster, point kubectl at it. +Run this in your second terminal from inside the `platform-demo` directory: -### Configure AWS credentials +```bash +kind get kubeconfig --name up-app-w-db > ~/.kube/config +``` + +:::warning +This overwrites your existing `~/.kube/config`. To preserve your existing +contexts, use `kind get kubeconfig --name up-app-w-db > ~/.kube/config-upbound` +and then merge: `KUBECONFIG=~/.kube/config:~/.kube/config-upbound kubectl +config view --flatten > ~/.kube/config.merged && mv ~/.kube/config.merged ~/.kube/config` +::: -1. Create a Kubernetes secret with your AWS credentials: +Verify the connection: + +```bash +kubectl get nodes +``` + +### Apply AWS credentials + +1. Create the demo namespace: + + ```bash + kubectl create namespace demo + ``` + +2. Create a Kubernetes secret with your AWS credentials: ```bash kubectl create secret generic aws-secret \ @@ -211,45 +575,73 @@ versions are declared in `upbound.yaml`. Install them directly: "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY")" ``` -2. Apply the `ProviderConfig` that references those credentials: +### Verify the setup - ```bash - kubectl apply -f setup/config/ - ``` +Check that providers are installed and healthy: -3. Verify the `ProviderConfig` is present: +```bash +kubectl get providers +``` - ```bash - kubectl get providerconfigs.aws.m.upbound.io default -n demo - ``` +All providers should show `HEALTHY: True`. Keep running this command until all +show `HEALTHY: True` before continuing. -### Install the platform APIs +:::warning +If this command returns **No resources found**, `up project run --local` did +not complete successfully. Check that terminal for errors. An empty list means +provisioning failed, not that it's still in progress. Delete the cluster with +`kind delete cluster --name up-app-w-db` and restart. +::: -Apply the XRDs and Compositions from the demo repository: +Check that the composition function is healthy: ```bash -kubectl apply -f apis/ +kubectl get functions ``` +The function should show `HEALTHY: True`. + +:::warning +If this returns **No resources found**, the KCL function was not built or +deployed. Check the `up project run` terminal and restart. +::: + Verify the APIs are established: ```bash -kubectl get xrd +kubectl get xrds ``` -All XRDs should show `ESTABLISHED: True`. +Both XRDs should show `ESTABLISHED: True` before continuing. + +:::warning +If this returns **No resources found**, stop here. No subsequent step will +work without the XRDs installed. Return to the `up project run` terminal to +diagnose the failure. +::: + +### Apply the ProviderConfig + +The `ProviderConfig` CRD is registered by the AWS provider. Apply it only after +providers are healthy: + +```bash +kubectl apply -f setup/config/ +``` :::info -AWS resource provisioning — especially RDS — takes 5–8 minutes. Each part of this -tutorial is structured so you can keep reading while AWS works. +AWS resource provisioning — especially RDS — takes 5–8 minutes. Each section +of this tutorial is structured so you can keep reading while AWS works. ::: ## Deploy an app with a database +The end-user interface for this platform is a 10-line manifest. A developer +fills in three fields: replica count, database size, and AWS region. The VPC, +subnets, IAM role, and RDS configuration are abstracted away. -Open `examples/appwdb/example.yaml`: - -```yaml +```bash +kubectl apply -f - <<'EOF' apiVersion: demo.upbound.io/v1alpha1 kind: AppWDB metadata: @@ -260,119 +652,65 @@ spec: replicas: 2 dbSize: db.t3.micro region: eu-central-1 +EOF ``` -This is the entire end-user interface. A developer fills in three fields: replica -count, database size, and AWS region. They don't see the VPC, subnets, IAM role, -or RDS configuration behind it. - -### Deploy it +Check the composite resource status: -1. Apply the manifest: - - ```bash - kubectl apply -f examples/appwdb/example.yaml - ``` - -2. In the Upbound Console, click **View all Composite Resources**. You should see - `demo-01` listed with Crossplane actively reconciling it. +```bash +kubectl get appwdb demo-01 -n demo +``` -That 10-line file expands into: +That 10-line manifest creates: - VPC + 3 subnets (eu-central-1a, b, c) - RDS subnet group + PostgreSQL instance (gp3 storage) - IAM role - Kubernetes `Deployment` scaled to `replicas: 2` -3. Open the AWS Console and set your region to **eu-central-1**. Check: - - - [IAM Roles](https://us-east-1.console.aws.amazon.com/iam/home#/roles) — look for `demo-01-role` - - [VPCs](https://eu-central-1.console.aws.amazon.com/vpcconsole/home#vpcs:) — look for `demo-01-vpc` - - [RDS Databases](https://eu-central-1.console.aws.amazon.com/rds/home#databases:) — watch for `demo-01-db` (takes 5–8 minutes) - - -4. Open `apis/appwdb/definition.yaml`. +Open the AWS Console and set your region to **eu-central-1**. Check: +- **IAM → Roles** — look for `demo-01-role` +- **VPC → Your VPCs** — look for `demo-01-vpc` +- **RDS → Databases** — watch for `demo-01-db` (takes 5–8 minutes) - This is the XRD — it defines what end users can request. Notice `dbSize` is an enum, - not a free-text field. Users can't request a size the platform doesn't support. +Verify the `Deployment` came up: -5. Open `apis/appwdb/composition.yaml`. - - This is the Composition — the mapping from those 10 lines to the full set of AWS - resources. It calls a function written in KCL. You can also write Composition - functions in [Go][fn-go], [Python][fn-python], or [Go Templating][fn-go-template], - and mix languages within a single pipeline. - -6. Open `functions/compose-resources/main.k`. - - This is the logic layer. It reads `dbSize` and `replicas` from the composite - resource and outputs every managed resource Crossplane will create. - - -7. Check the composite resource status: - - ```bash - kubectl get appwdb demo-01 -n demo - ``` - -8. Verify the `Deployment` came up (faster than RDS, since it's just a container): - - ```bash - kubectl get pods -n demo - ``` - -9. Describe a pod to confirm it's running. Replace `` with the name from - the previous output: - - ```bash - kubectl describe pod -n demo - ``` - -10. In the Upbound Console, click into `demo-01` and open the **relationship view** - to see the full resource tree and sync status for each composed resource. - - :::info - `demo-01-db` takes a few minutes to reach `SYNCED: True`. Continue to the - next section - while AWS finishes provisioning. - ::: +```bash +kubectl get pods -n demo +``` -## Providers and ProviderConfigs +### Explore the composition -**Providers** are Kubernetes controllers that know how to create, update, and delete -resources in a specific cloud service — EC2, RDS, IAM, and so on. In Crossplane 2.0, -the Kubernetes `Deployment` is managed natively without a separate provider. +Open `apis/appwdb/definition.yaml`. -**ProviderConfigs** tell those providers how to authenticate. The tutorial uses static -credentials, but production deployments can use OIDC, IRSA, Workload Identity, and -other methods depending on the provider. See [provider authentication][auth-docs]. +This is the XRD — it defines what end users can request. The `dbSize` field is +an enum, not a free-text field. Users can't request a size the platform doesn't +support. -### Verify the providers +Open `apis/appwdb/composition.yaml`. -```bash -kubectl get providers -kubectl get providerconfigs.aws.m.upbound.io default -n demo -``` +This is the Composition — the mapping from those 10 lines to the full set of AWS +resources. It calls the KCL function you created. You can also write Composition +functions in [Go][fn-go], [Python][fn-python], or [Go Templating][fn-go-template], +and mix languages within a single pipeline. -All providers should be `HEALTHY: True`. The `default` ProviderConfig is what connects -them to the AWS account where `demo-01-db` is provisioning. +Open `functions/compose-resources/main.k`. -In the Upbound Console, navigate to `demo-01` and open the **relationship view**. You'll -see all composed resources — VPC, subnets, RDS instance, IAM role, and `Deployment` — -with their sync status and how they connect. +This is the logic layer. It reads `dbSize` and `replicas` from the composite +resource and outputs every managed resource Crossplane creates. ## Drift detection -Crossplane never stops watching. If someone changes a resource directly in AWS, Crossplane -detects the difference between desired state and actual state and corrects it. This is -**drift detection**. +Crossplane never stops watching. If someone changes a resource directly in AWS, +Crossplane detects the difference between desired state and actual state and +corrects it. This is drift detection. -### Confirm the VPC is ready +### Trigger drift -1. Verify the VPC is running: +1. Verify the VPC is ready: ```bash - kubectl get vpcs.ec2.aws.m.upbound.io demo-01-vpc -n demo + kubectl get vpcs.ec2.aws.m.upbound.io demo-01-vpc ``` Wait until `SYNCED: True`. @@ -385,7 +723,7 @@ detects the difference between desired state and actual state and corrects it. T 4. Tell Crossplane to reconcile immediately instead of waiting for the next loop: ```bash - kubectl annotate vpcs.ec2.aws.m.upbound.io demo-01-vpc -n demo \ + kubectl annotate vpcs.ec2.aws.m.upbound.io demo-01-vpc \ reconcile.crossplane.io/trigger="$(date)" \ --overwrite ``` @@ -393,151 +731,293 @@ detects the difference between desired state and actual state and corrects it. T 5. Watch the sync status: ```bash - kubectl get vpcs.ec2.aws.m.upbound.io demo-01-vpc -n demo -w \ + kubectl get vpcs.ec2.aws.m.upbound.io demo-01-vpc -w \ -o custom-columns='NAME:.metadata.name,SYNCED:.status.conditions[?(@.type=="Synced")].reason' ``` 6. Switch to the AWS Console and watch the Name tag snap back to `demo-01-vpc`. -7. Verify the reconciliation: +### Verify recovery - ```bash - kubectl get appwdb demo-01 -n demo - ``` +```bash +kubectl get appwdb demo-01 -n demo +``` - `SYNCED: True` confirms the control plane corrected the drift. +`SYNCED: True` confirms the control plane corrected the drift. ## Add policy enforcement -**Kyverno** is a policy engine that intercepts Kubernetes admission requests before +Kyverno is a policy engine that intercepts Kubernetes admission requests before they're accepted. A policy violation is blocked before Crossplane runs — nothing reaches AWS. +### Install Kyverno + +1. Create the Kyverno add-on manifest: + + ```bash + mkdir -p w-kyverno + cat > w-kyverno/addon-kyverno.yaml <<'EOF' + apiVersion: pkg.upbound.io/v1beta1 + kind: AddOn + metadata: + name: upbound-addon-kyverno + spec: + package: xpkg.upbound.io/upbound/addon-kyverno:3.7.0 + EOF + ``` -1. Apply the Kyverno add-on from the Upbound Marketplace: +2. Apply it: ```bash kubectl apply -f w-kyverno/addon-kyverno.yaml ``` -2. In the Upbound Console, select **AddOns** in the left navigation. Wait for - `upbound-addon-kyverno` to become healthy (~2 minutes). +3. Wait for Kyverno to become healthy (~2 minutes): + ```bash + kubectl get addons.pkg.upbound.io upbound-addon-kyverno -w + ``` + + Watch the output. You'll see `INSTALLED: True` appear first, then + `HEALTHY: True` once the webhook is running (~2–3 minutes). Press Ctrl+C + once `HEALTHY: True` appears. + + If it stays `HEALTHY: False` after 5 minutes, check + `kubectl describe addons.pkg.upbound.io upbound-addon-kyverno` for events + and verify the UXP installation is healthy with `kubectl get pods -n upbound-system`. + +4. Create the no-privileged-containers policy: + + ```bash + cat > w-kyverno/policy-no-privileged.yaml <<'EOF' + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: disallow-privileged-containers + annotations: + policies.kyverno.io/title: Disallow Privileged Containers + policies.kyverno.io/category: Pod Security + policies.kyverno.io/severity: high + policies.kyverno.io/description: >- + Privileged containers have unrestricted access to the host system. + This policy blocks any AppWDBSecure request with securityContext.privileged: true + before Crossplane composes any resources — nothing reaches AWS. + Applies to all requests — human, GitOps, or AI agent. + spec: + validationFailureAction: Enforce + background: false + rules: + - name: no-privileged-platform-api + match: + any: + - resources: + kinds: + - AppWDBSecure + validate: + message: "Privileged containers are not allowed on this platform. Remove securityContext.privileged: true from your request." + pattern: + spec: + parameters: + =(securityContext): + =(privileged): "false" + - name: no-privileged-deployment + match: + any: + - resources: + kinds: + - Deployment + validate: + message: "Privileged containers are not allowed on this platform. Remove securityContext.privileged: true from your request." + pattern: + spec: + template: + spec: + containers: + - =(securityContext): + =(privileged): "false" + EOF + ``` -3. Apply the policy: +5. Apply the policy: ```bash kubectl apply -f w-kyverno/policy-no-privileged.yaml - kubectl get clusterpolicy ``` - `READY: True` means the policy is active. `disallow-privileged-containers` rejects - any `AppWDBSecure` request where `securityContext.privileged` is `true` — at - admission time, before Crossplane sees it. + You may see this warning: + + ``` + Warning: the kind defined in the all match resource is invalid: unable to convert GVK to GVR for kinds AppWDBSecure, err: resource not found + ``` + + This is expected if the XRDs were recently established and doesn't prevent + the policy from enforcing once the CRD is ready. + +6. Verify the policy is active: + + ```bash + kubectl get clusterpolicy disallow-privileged-containers + ``` + + Expected output: + + ``` + NAME ADMISSION BACKGROUND READY AGE MESSAGE + disallow-privileged-containers true false True ... Ready + ``` + + `READY: True` means the policy is enforcing. `BACKGROUND: false` is expected + — this policy operates at admission time only, not as a background scan. +### Block a privileged request -4. Next, trigger a policy violation. Open `examples/appwdbsecure/example-1.yaml`. It has `securityContext.privileged: true`. +:::warning +Kyverno can only evaluate requests for resource types whose CRDs are already +installed. If you see `no matches for kind "AppWDBSecure"` when running the +next command, the XRDs are not installed. Return to the setup section and +confirm that `kubectl get xrds` shows both XRDs as `ESTABLISHED: True` before +continuing. +::: -5. Try to apply it: +1. Try to apply a privileged request: ```bash - kubectl apply -f examples/appwdbsecure/example-1.yaml + kubectl apply -f - <<'EOF' + apiVersion: demo.upbound.io/v1alpha1 + kind: AppWDBSecure + metadata: + name: kyverno-demo-01 + namespace: demo + spec: + parameters: + replicas: 2 + dbSize: db.t3.micro + region: eu-central-1 + securityContext: + privileged: true + EOF ``` - The request is blocked immediately. The error message tells you exactly which policy - caught it. Nothing was created. + The request is blocked immediately by Kyverno. The error references + `disallow-privileged-containers`. Nothing was created. :::info - `demo-01` — deployed before Kyverno was installed — has a running RDS instance - right now. This request didn't start at all. + `demo-01` — deployed before Kyverno was installed — has a running RDS + instance right now. This request didn't start at all. ::: -6. Apply the compliant request: +### Apply a compliant request + +1. Apply the compliant request: ```bash - kubectl apply -f examples/appwdbsecure/example-2.yaml - kubectl get appwdbsecure -n demo -w + kubectl apply -f - <<'EOF' + apiVersion: demo.upbound.io/v1alpha1 + kind: AppWDBSecure + metadata: + name: kyverno-demo-01 + namespace: demo + spec: + parameters: + replicas: 2 + dbSize: db.t3.micro + region: eu-central-1 + securityContext: + privileged: false + EOF ``` - `privileged: false` passes the policy check and starts provisioning. This takes - ~10 minutes. + `privileged: false` passes the policy check and starts provisioning. This + takes approximately 10 minutes. -7. Verify the policy enforcement: +2. Watch the status: ```bash - kubectl get clusterpolicy disallow-privileged-containers + kubectl get appwdbsecure -n demo -w ``` - `READY: True` confirms the policy is enforcing. - ## Change it live -To change infrastructure, update the desired state. Crossplane figures out what needs -to change and does it. +To change infrastructure, update the desired state. Crossplane figures out what +needs to change and does it. - -Scale the database +### Scale the database 1. Apply the change: ```bash - kubectl apply -f examples/appwdb/variant-bigger-db.yaml + kubectl apply -f - <<'EOF' + apiVersion: demo.upbound.io/v1alpha1 + kind: AppWDB + metadata: + name: demo-01 + namespace: demo + spec: + parameters: + replicas: 2 + dbSize: db.t3.medium + region: eu-central-1 + EOF ``` -2. Watch the status. `DESIRED` updates immediately; `ACTUAL` updates once AWS finishes - (~5 minutes): +2. Watch the status. `DESIRED` updates immediately; `ACTUAL` updates once AWS + finishes (~5 minutes): ```bash - kubectl get instance.rds.aws.m.upbound.io demo-01-db -n demo -w \ + kubectl get instances.rds.aws.m.upbound.io demo-01-db -w \ -o custom-columns='NAME:.metadata.name,DESIRED:.spec.forProvider.instanceClass,ACTUAL:.status.atProvider.instanceClass,SYNCED:.status.conditions[?(@.type=="Synced")].reason' ``` 3. In the AWS Console, check the **Status** and **Size** columns for `demo-01-db`. -4. In the Upbound Console, navigate to `demo-01` and open the **relationship view** - to see the updated resource tree. - -5. Confirm the change took effect: +4. Confirm the change took effect: ```bash kubectl get appwdb demo-01 -n demo ``` - `SYNCED: True` with your updated `dbSize` or `replicas` means you're done. + `SYNCED: True` with your updated `dbSize` means the change applied. ## Clean up Delete the composite resources. Crossplane deletes all composed AWS resources -(RDS instance, VPC, subnets, IAM role) before the composite resource is removed. +before removing the composite resource. ```shell kubectl delete appwdbsecure kyverno-demo-01 -n demo kubectl delete appwdb demo-01 -n demo ``` -RDS deletion takes 5–10 minutes. Wait until both resources are fully removed before -deleting the cluster: +RDS deletion takes 5–10 minutes. Wait until both resources are fully removed: + +```shell +kubectl get appwdb -n demo -w +``` ```shell -kubectl get appwdb,appwdbsecure -n demo -w +kubectl get appwdbsecure -n demo -w ``` -Once both are gone, delete the kind cluster: +Once both are gone, stop `up project run` with Ctrl+C in that terminal, then +delete the cluster: ```shell -kind delete cluster --name upbound-demo +kind delete cluster --name up-app-w-db ``` ## Next steps In this tutorial, you: -- Deployed a composite resource that created a VPC, subnets, IAM role, RDS instance, - and Kubernetes `Deployment` from a 10-line manifest +- Created a Crossplane project with XRDs, Compositions, and a KCL function +- Deployed a composite resource that created a VPC, subnets, IAM role, RDS + instance, and Kubernetes `Deployment` from a 10-line manifest - Watched Crossplane detect and correct an out-of-band change to a VPC tag - Blocked a privileged container request with Kyverno before it reached the cluster -- Updated live infrastructure by changing a field in desired state +- Updated live infrastructure by changing desired state +Continue with: - [Composite Resource Definitions][xrd-concept] — design your own platform APIs - [Composition functions][fn-docs] — write the logic that maps user requests to resources @@ -545,9 +1025,9 @@ In this tutorial, you: - [Upbound Marketplace][marketplace] — providers and add-ons for AWS, Azure, GCP, and more [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ -[up-cli]: /manuals/cli/overview +[up-cli-releases]: https://github.com/upbound/up/releases [aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html -[connect-ctp]: {link-to-connect-control-plane-guide} +[kind]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation [fn-go]: /manuals/cli/howtos/compositions/go/ [fn-python]: /manuals/cli/howtos/compositions/python/ [fn-go-template]: /manuals/cli/howtos/compositions/go-template/ From 83fde7e63578aadd402c4d8209a9bc0fdf77c970 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Mon, 4 May 2026 11:53:11 -0400 Subject: [PATCH 06/12] update titles, step order --- docs/getstarted/ai-controller-tutorial.md | 301 +++++---- .../ai-database-scaling-tutorial.md | 535 ++++++++++++++++ ...plane-tutorial.md => platform-tutorial.md} | 592 +++++++++++------- 3 files changed, 1032 insertions(+), 396 deletions(-) create mode 100644 docs/getstarted/ai-database-scaling-tutorial.md rename docs/getstarted/{crossplane-tutorial.md => platform-tutorial.md} (67%) diff --git a/docs/getstarted/ai-controller-tutorial.md b/docs/getstarted/ai-controller-tutorial.md index 2c00527e..fe7bd103 100644 --- a/docs/getstarted/ai-controller-tutorial.md +++ b/docs/getstarted/ai-controller-tutorial.md @@ -19,7 +19,7 @@ manifest. Crossplane applies it. By the end of this tutorial, you can: -- Run a Crossplane `WatchOperation` that calls a local LLM +- Deploy a `WatchOperation` that calls a local LLM on every resource change - Watch the controller detect and correct a policy violation automatically - Update the enforcement rule by editing a single field in YAML @@ -48,18 +48,16 @@ Move the binary into your `PATH`: sudo mv up /usr/local/bin/ ``` -If you don't have `sudo` access, install to a user-local directory instead: +If you don't have `sudo` access: ```shell mkdir -p ~/.local/bin && mv up ~/.local/bin/ -``` - -Then add it to your shell profile (`~/.bashrc`, `~/.zshrc`, or equivalent): - -```shell export PATH="$HOME/.local/bin:$PATH" ``` +Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, or +equivalent) to make it permanent. + Verify the installation: ```shell @@ -79,10 +77,6 @@ All commands from this point run from inside the `english-controller` directory. ### Create the project manifest -The `upbound.yaml` file declares the project and its function dependencies. -`up project run --local` reads this file to know which packages to install into -the cluster. Create it with: - ```bash cat > upbound.yaml <<'EOF' apiVersion: meta.dev.upbound.io/v2alpha1 @@ -93,23 +87,22 @@ spec: dependsOn: - apiVersion: pkg.crossplane.io/v1 kind: Function - # function-auto-ready marks composed resources as ready automatically; - # required by Crossplane's composition machinery even when not used directly. package: xpkg.upbound.io/crossplane-contrib/function-auto-ready version: '>=v0.0.0' - apiVersion: pkg.crossplane.io/v1 kind: Function - # function-openai is the function the WatchOperation calls to reach the LLM. package: xpkg.upbound.io/upbound/function-openai version: v0.3.0 description: A Kubernetes controller whose enforcement logic is written in plain English. EOF ``` -### Create the `WatchOperation` +### Create the WatchOperation -The `WatchOperation` is the controller — it defines what to watch and what -function to call when the watched resource changes. +The `WatchOperation` is the controller. It watches the nginx `Deployment` and +calls `upbound-function-openai` whenever it changes. The function sends the +current resource state to the LLM along with the `systemPrompt` rule. The LLM +returns a corrected manifest. Crossplane applies it. ```bash mkdir -p operations/replicas @@ -155,12 +148,41 @@ EOF ``` :::info -With a larger model like `gpt-4o` or `gpt-oss:20b`, the `systemPrompt` can be -much simpler — just the rule itself, without the output format instructions. -The explicit YAML output guidance in `userPrompt` is needed specifically for -`qwen2.5:1.5b`. +The explicit output instructions in `userPrompt` are needed for `qwen2.5:1.5b`. +With a larger model like `gpt-4o`, the `systemPrompt` can be much simpler — just +the rule itself, without format guidance. ::: +### Create the nginx deployment + +Create the starting state — 1 replica. The AI controller will correct this. + +```bash +mkdir -p examples +cat > examples/deployment.yaml <<'EOF' +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx +EOF +``` + ## Set up Ollama Ollama runs the LLM locally. Install it and pull the model before starting the @@ -178,18 +200,11 @@ If the install script doesn't work for your OS, download directly from ### Start Ollama On Linux, the install script registers a systemd service that starts Ollama -automatically. On macOS, Ollama may not start automatically after installation. -If `ollama list` returns "could not connect to ollama server", start it manually -in a separate terminal before continuing: - -``` -ollama serve -``` - -Verify it's ready: +automatically. On macOS, start it manually in a separate terminal if +`ollama list` returns "could not connect to ollama server": ```shell -ollama list +ollama serve ``` ### Pull the model @@ -208,40 +223,37 @@ You should see `qwen2.5:1.5b` in the output. ## Start the project -Run `up project run --local` from inside the `english-controller` directory. -This command creates a kind cluster, installs UXP, and deploys all packages and -APIs defined in the project. It exits when the cluster is ready. +Run from inside the `english-controller` directory: ```bash up project run --local --control-plane-version=2.1.4-up.2 ``` -The `--control-plane-version` flag pins the UXP version installed into the kind -cluster. This tutorial was tested with `2.1.4-up.2`. If you need a different -version, find available version strings in the [UXP release notes][uxp-releases]. - -This takes several minutes on first run — it pulls provider packages and sets up -the cluster. Subsequent runs are faster. +This creates a kind cluster, installs UXP, and deploys the function packages +declared in `upbound.yaml`. It exits when the cluster is ready. :::warning If `up project run --local` exits non-zero and prints `traces export: context -deadline exceeded`, check whether providers were installed: +deadline exceeded`, check whether functions were installed: + +```bash +kubectl get functions +``` + +If functions appear, provisioning succeeded despite the telemetry error. +If the list is empty, delete the cluster and retry: ```bash -kubectl get providers +kind delete cluster --name up-english-controller ``` -If providers appear, provisioning succeeded despite the telemetry error. -If the list is empty, provisioning failed. Run -`kind delete cluster --name up-app-w-db` and retry. Verify your network -allows outbound connections to `xpkg.upbound.io` on port 443. +Verify your network allows outbound connections to `xpkg.upbound.io` on port 443. ::: -Once the command completes, set your kubeconfig. `up project run --local` names -the kind cluster `up-app-w-db` by default: +### Configure kubectl ```bash -kind get kubeconfig --name up-app-w-db > ~/.kube/config +kind get kubeconfig --name up-english-controller > ~/.kube/config ``` :::warning @@ -249,7 +261,7 @@ This overwrites your existing `~/.kube/config`. To preserve existing contexts, merge instead: ```bash -kind get kubeconfig --name up-app-w-db > ~/.kube/config-upbound +kind get kubeconfig --name up-english-controller > ~/.kube/config-upbound KUBECONFIG=~/.kube/config:~/.kube/config-upbound \ kubectl config view --flatten > ~/.kube/config.merged mv ~/.kube/config.merged ~/.kube/config @@ -262,11 +274,10 @@ Verify the connection: kubectl get nodes ``` -## Wire Ollama into the cluster +### Wire Ollama into the cluster -The kind cluster's pods need to reach Ollama running on your host. This step -creates a Kubernetes `Service` and `Endpoints` resource that route cluster -traffic to your host machine. +The kind cluster's pods need to reach Ollama running on your host. Create a +Kubernetes `Service` and `Endpoints` that route cluster traffic to your machine. 1. Get the host IP on the kind bridge network: @@ -329,9 +340,10 @@ traffic to your host machine. EOF ``` - The `OPENAI_BASE_URL` points to Ollama's OpenAI-compatible API. To switch - to a cloud model, replace this URL and update `OPENAI_API_KEY` and - `OPENAI_MODEL` — the `WatchOperation` works identically. + The `OPENAI_BASE_URL` points to Ollama's OpenAI-compatible API. To switch to + a cloud model, replace `OPENAI_BASE_URL` with `https://api.openai.com/v1`, + set `OPENAI_API_KEY` to your API key, and update `OPENAI_MODEL`. The + `WatchOperation` works identically regardless of which model runs. ### Verify the setup @@ -345,39 +357,17 @@ Wait until `upbound-function-openai` shows `HEALTHY: True`. :::warning If `kubectl get functions` returns **No resources found**, `up project run ---local` did not complete successfully. Check the output from that command, -delete the cluster with `kind delete cluster --name up-app-w-db`, and restart -from the [Start the project](#start-the-project) step. +--local` did not complete successfully. Delete the cluster with +`kind delete cluster --name up-english-controller` and restart from +[Start the project](#start-the-project). ::: ### Apply the starting state -Apply the nginx `Deployment` at 1 replica — the AI controller will correct -this: +Apply the nginx `Deployment` at 1 replica: ```bash -kubectl apply -f - <<'EOF' -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: nginx - name: nginx - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - image: nginx - name: nginx -EOF +kubectl apply -f examples/deployment.yaml ``` Verify it's running: @@ -388,67 +378,56 @@ kubectl get deployment nginx You should see `READY: 1/1`. -## Part 1: Run the AI controller +## Run the AI controller -A Crossplane `WatchOperation` fires every time a specific resource changes. -Here, it watches the nginx `Deployment` in the `default` namespace. When it -fires, it calls `upbound-function-openai`, which sends the current state to -the LLM along with the rule in `systemPrompt`. The LLM returns a corrected -manifest. Crossplane applies it. +An nginx `Deployment` is running in the cluster with only 1 replica. Apply the +`WatchOperation` and watch it fix that. -### Apply the WatchOperation +### See the current state ```bash -kubectl apply -f operations/replicas/operation.yaml +kubectl get deployment nginx ``` -The `WatchOperation` fires immediately because the `Deployment` exists. +`READY 1/1` is the starting point. -### Watch it act - -```bash -kubectl get deployment nginx -w -``` +### Apply the WatchOperation -Within 60–90 seconds, replicas jump from 1 to 3. The LLM read the -`Deployment`, decided it violated the rule, and patched it. +Crossplane Operations are Kubernetes objects that run logic against your cluster +on a trigger. There are three kinds: -Press Ctrl+C when the replicas reach 3. +| Kind | Trigger | +|------|---------| +| `WatchOperation` | Every time a specific resource changes | +| `CronOperation` | On a schedule | +| `Operation` | Once, on demand | -### Explore the controller +This tutorial uses a `WatchOperation`. It watches the nginx `Deployment` and +calls an LLM every time it changes. -Open `operations/replicas/operation.yaml`. That file is the entire controller. +```bash +kubectl apply -f operations/replicas/operation.yaml +``` -The `systemPrompt` is the reconciliation logic: +The `WatchOperation` fires immediately because the `Deployment` already exists. -```text -systemPrompt: |- - You are a Kubernetes controller. Output raw YAML only. +### Watch it act - Rule: if spec.replicas is less than 3, set it to 3. Otherwise keep it unchanged. +```bash +kubectl get deployment nginx -w ``` -The `watch` block defines the trigger: +Within 60–90 seconds, replicas jump from 1 to 3. The LLM read the `Deployment`, +decided it violated the rule, and patched it. -```text -watch: - apiVersion: apps/v1 - kind: Deployment - namespace: default -``` - -Every time any `Deployment` in `default` changes, the operation fires. +Press Ctrl+C when replicas reach 3. ### Inspect the operation records -Each `Operation` object is a record of a single invocation — what fired, what -the model returned, and what the controller applied. +Each `Operation` object is a record of a single invocation. ```bash kubectl get watchoperations -``` - -```bash kubectl get operations ``` @@ -458,23 +437,23 @@ Pick one of the operation names and describe it: kubectl describe operation ``` -The `Events` section shows the exact YAML the model returned. +The `Events` section shows the exact YAML the model returned and what the +controller applied. -## Part 2: Watch it self-heal +## Watch it heal -The controller re-evaluates on every change. If something modifies the -`Deployment` — a human, a CI pipeline, a rollout — the rule re-applies. -This is drift detection with reasoning. +The `WatchOperation` re-evaluates on every change. If anything modifies the +`Deployment` — a human, a CI pipeline, a rollout — the rule re-applies. This is +drift detection with reasoning: not just "was this field changed" but "does this +still satisfy the intent?" -### Trigger a violation - -Scale nginx down to 1 replica: +### Scale down nginx ```bash kubectl scale deployment nginx --replicas=1 ``` -### Watch it recover +### Watch the controller heal it ```bash kubectl get deployment nginx -w @@ -486,29 +465,36 @@ the rule, and patched it. Press Ctrl+C when replicas are back at 3. -### Inspect what fired +### See what fired ```bash +kubectl get watchoperations kubectl get operations ``` -Each entry is a new record. The most recent one captured the scale-down event -and the correction. - -## Part 3: Update the rules +Each entry is a record of what fired, what the model decided, and what changed. +The most recent one captured the scale-down event and the correction. -The enforcement logic is a text field. To change the policy, edit `systemPrompt` -and re-apply. - -### Open the operation +### See where the model runs ```bash -cat operations/replicas/operation.yaml +kubectl get secret gpt -n crossplane-system -o yaml ``` -### Change the minimum replicas to 5 +`OPENAI_BASE_URL` points to Ollama's OpenAI-compatible API running locally on +your machine — no data leaves the machine. Change that URL to +`https://api.openai.com/v1` and update `OPENAI_MODEL`, and the +`WatchOperation` works identically. + +## Change the rules -Find the `systemPrompt` and update the rule line. Change: +The enforcement logic is a text field. To change the policy, edit `systemPrompt` +and re-apply. No code change. No build pipeline. No rollout. + +### Update the minimum replicas to 5 + +Open `operations/replicas/operation.yaml`. Find the `systemPrompt` and change +the rule line from: ```text Rule: if spec.replicas is less than 3, set it to 3. Otherwise keep it unchanged. @@ -536,6 +522,13 @@ sed -i 's/less than 3, set it to 3/less than 5, set it to 5/' \ operations/replicas/operation.yaml ``` +:::info +With `qwen2.5:1.5b`, keep the full `userPrompt` output instructions in place. +The explicit YAML template keeps the small model's output reliable. With a +larger model like `gpt-4o`, you can remove the `userPrompt` entirely and keep +only the rule in `systemPrompt`. +::: + ### Apply the updated operation ```bash @@ -544,7 +537,7 @@ kubectl apply -f operations/replicas/operation.yaml ### Trigger and observe -Scale nginx down to 1 to trigger the new rule: +Scale nginx down to 1: ```bash kubectl scale deployment nginx --replicas=1 @@ -568,7 +561,7 @@ kubectl get operations Same architecture, different policy — changed by editing a text field. :::tip -Try adding a condition to the rule: +Try adding a conditional rule to the `systemPrompt`: ``` If the deployment name contains 'prod', require at least 5 replicas. @@ -576,7 +569,8 @@ Otherwise, require at least 2. ``` The model interprets natural language conditions the same way it interprets -simple numeric rules. +simple numeric rules. Any platform engineer can read the rule, change it, and +version it in Git — without writing Go. ::: ## Clean up @@ -592,27 +586,25 @@ kubectl delete deployment nginx Delete the cluster: ```bash -kind delete cluster --name up-app-w-db +kind delete cluster --name up-english-controller ``` ## Next steps In this tutorial, you: -- Created a Crossplane project with `upbound.yaml` and a `WatchOperation` +- Created a Crossplane project with a `WatchOperation` and a KCL function - Deployed a controller that calls a local LLM on every `Deployment` change - Watched the controller detect and correct a replica count violation - Updated the enforcement policy by editing a single field in YAML Continue with: -- [WatchOperations reference][watchops-ref] — triggers, concurrency, history - limits, and output handling +- [WatchOperations reference][watchops-ref] — triggers, concurrency, history limits, and output handling +- [CronOperations reference][cronops-ref] — schedule-driven operations - [Composition functions][fn-docs] — build custom logic for any resource -- [Provider authentication][auth-docs] — connect providers to your own cloud - account -- [Upbound Marketplace][marketplace] — functions and providers for AWS, Azure, - GCP, and more +- [Provider authentication][auth-docs] — connect providers to your own cloud account +- [Upbound Marketplace][marketplace] — functions and providers for AWS, Azure, GCP, and more [docker-install]: https://docs.docker.com/get-docker/ [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ @@ -621,6 +613,7 @@ Continue with: [up-cli-releases]: https://github.com/upbound/up/releases [uxp-releases]: /reference/release-notes/ [watchops-ref]: /manuals/crossplane/operations/watch/ +[cronops-ref]: /manuals/crossplane/operations/cron/ [fn-docs]: /manuals/cli/howtos/compositions/ [auth-docs]: /manuals/packages/providers/authentication/ [marketplace]: https://marketplace.upbound.io/ diff --git a/docs/getstarted/ai-database-scaling-tutorial.md b/docs/getstarted/ai-database-scaling-tutorial.md new file mode 100644 index 00000000..5e0a2306 --- /dev/null +++ b/docs/getstarted/ai-database-scaling-tutorial.md @@ -0,0 +1,535 @@ +--- +title: AI-driven database scaling with Crossplane +description: Deploy an AI controller that reads live RDS metrics and scales a database automatically — no Go, no custom operator, just a YAML CronOperation and a plain-English system prompt. +weight: {weight} +validation: + type: walkthrough + owner: docs@upbound.io + environment: local-upbound + timeout: 60m + variables: + AWS_ACCESS_KEY_ID: "" + AWS_SECRET_ACCESS_KEY: "" + ANTHROPIC_API_KEY: "" +--- + +In this tutorial, you deploy an AI controller that manages an AWS RDS database. +A `CronOperation` runs every minute. It reads live CloudWatch metrics from the +database object, calls Claude, and decides whether to scale. If it scales, it +writes its reasoning back to the object as an annotation. + +No Go. No custom operator. No build pipeline. The controller is a single YAML file. + +By the end of this tutorial, you can: + +- See live CloudWatch metrics surfaced directly on a Crossplane `SQLInstance` object +- Deploy an AI scaling controller with a single `kubectl apply` +- Read the model's reasoning from the Kubernetes object it acted on +- Trigger a load test and watch the AI decide to scale up in real time + +## Prerequisites + +Install the following tools before starting: + +- [`kubectl`][kubectl-install] +- [AWS CLI][aws-cli], configured with credentials that can create VPCs and RDS instances +- [kind][kind] +- An [Anthropic API key][anthropic-console] with access to Claude + +### Install the up CLI + +```shell +curl -sL "https://cli.upbound.io" | sh +sudo mv up /usr/local/bin/ +``` + +If you don't have `sudo` access: + +```shell +mkdir -p ~/.local/bin && mv up ~/.local/bin/ +export PATH="$HOME/.local/bin:$PATH" +``` + +Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, or +equivalent) to make it permanent. + +### Install mysqlslap + +The load test in this tutorial uses `mysqlslap`, which ships with the MySQL +client tools. + +**macOS:** + +```shell +brew install mysql-client +export PATH="$(brew --prefix mysql-client)/bin:$PATH" +``` + +**Linux (Debian/Ubuntu):** + +```shell +apt-get install -y mysql-client +``` + +## Clone the project + +```bash +git clone https://github.com/upbound/configuration-aws-database-ai demo +cd demo +``` + +All commands from this point run from inside the `demo` directory. + +## Configure credentials + +Export your AWS credentials and Anthropic API key. The setup steps below use +these values to create Kubernetes secrets. + +```bash +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export ANTHROPIC_API_KEY= +``` + +## Start the project + +Open a dedicated terminal and run from inside the `demo` directory: + +```bash +up project run --local --ingress +``` + +This command: + +- Creates a kind cluster +- Installs UXP +- Builds and deploys the composition functions (`function-rds-metrics` and `function-claude`) +- Installs the AWS providers declared in `upbound.yaml` +- Applies the XRDs from `apis/` +- Installs an ingress controller for the UXP console + +Startup takes several minutes. The command exits when the cluster is ready. + +:::warning +If `up project run --local` prints `traces export: context deadline exceeded` +in stderr, check whether providers were installed: + +```bash +kubectl get providers +``` + +If providers appear, provisioning succeeded despite the telemetry error. +If the list is empty, delete the cluster and retry: + +```bash +kind delete cluster --name up-$(basename "$PWD") +``` + +Verify your network allows outbound connections to `xpkg.upbound.io` on port 443. +::: + +### Configure kubectl + +In your second terminal, point kubectl at the new cluster. `up project run --local` +names the cluster after the project directory: + +```bash +CLUSTER_NAME=$(kind get clusters | grep "^up-" | head -1) +kind get kubeconfig --name "${CLUSTER_NAME}" > ~/.kube/config +``` + +:::warning +This overwrites your existing `~/.kube/config`. To preserve existing contexts, +merge instead: + +```bash +kind get kubeconfig --name "${CLUSTER_NAME}" > ~/.kube/config-upbound +KUBECONFIG=~/.kube/config:~/.kube/config-upbound \ + kubectl config view --flatten > ~/.kube/config.merged +mv ~/.kube/config.merged ~/.kube/config +``` +::: + +Verify the connection: + +```bash +kubectl get nodes +``` + +### Create the namespace and apply credentials + +1. Create the `database-team` namespace: + + ```bash + kubectl apply -f examples/ns-database-team.yaml + ``` + +2. Create the AWS credentials secret. The `ProviderConfig` and the + `function-rds-metrics` function both read from this secret: + + ```bash + kubectl create secret generic aws-creds \ + --namespace database-team \ + --from-literal=credentials="$(printf '[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n' \ + "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY")" \ + --dry-run=client -o yaml | kubectl apply -f - + + kubectl create secret generic aws-creds \ + --namespace crossplane-system \ + --from-literal=credentials="$(printf '[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n' \ + "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY")" \ + --dry-run=client -o yaml | kubectl apply -f - + ``` + +3. Create the Anthropic API key secret used by `function-claude`: + + ```bash + kubectl create secret generic claude \ + --namespace crossplane-system \ + --from-literal=ANTHROPIC_API_KEY="${ANTHROPIC_API_KEY}" \ + --dry-run=client -o yaml | kubectl apply -f - + ``` + +### Verify providers and functions + +Wait for both AWS providers and both functions to become healthy: + +```bash +kubectl get providers +kubectl get functions +``` + +All four should show `HEALTHY: True` before continuing. + +:::warning +If `kubectl get providers` or `kubectl get functions` returns **No resources found**, +`up project run --local` did not complete successfully. Delete the cluster and +restart from [Start the project](#start-the-project). +::: + +### Apply the ProviderConfig + +```bash +kubectl apply -f examples/providerconfig-aws-static.yaml +``` + +### Provision the network + +```bash +kubectl apply -f examples/network-rds-metrics.yaml +``` + +Wait for the network composite resource to become ready (~5 minutes): + +```bash +kubectl get network rds-metrics-database-ai-scale -n database-team -w +``` + +Wait until `READY: True`. Press Ctrl+C when it does. + +### Provision the database + +```bash +kubectl apply -f examples/mariadb-xr-rds-metrics.yaml +``` + +RDS provisioning takes 10–15 minutes. Watch the status: + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team -w +``` + +Wait until `READY: True` before continuing. Press Ctrl+C when it does. + +:::info +While waiting, the `function-rds-metrics` composition step is already +collecting CloudWatch data and writing it onto the object. By the time the +database is ready, `status.performanceMetrics` will have live data. +::: + +### Access the UXP console + +1. Enable the web UI: + + ```bash + up uxp web-ui enable + ``` + +2. In a new terminal, port-forward to the service: + + ```bash + kubectl port-forward -n crossplane-system svc/webui 8080:80 + ``` + +3. Open `http://localhost:8080` in your browser. + +## Meet the database + +An RDS MariaDB instance is running on AWS, managed by Crossplane. Before +wiring the AI into the loop, explore what the system already knows. + +### See the database object + +```bash +kubectl get sqlinstance -n database-team +``` + +You should see `rds-metrics-database-ai-mysql` with `READY: True`. That's a +real AWS RDS instance, managed as a Kubernetes object. + +In the UXP console, click **View all Composite Resources**. You'll see +`rds-metrics-database-ai-mysql` listed. Click **Relationship View** to see +the resources Crossplane provisioned. + +### Verify the AWS resource + +In the [AWS Console, RDS in us-west-2][aws-rds] — look for +`rds-metrics-database-ai-mysql`. + +The Kubernetes object and the AWS resource are the same thing. Crossplane is +the bridge. + +### See what the AI will read + +```bash +kubectl describe sqlinstance rds-metrics-database-ai-mysql -n database-team +``` + +Find the `status.performanceMetrics` block. That's live CloudWatch data — CPU +utilization, active connections, free storage — collected by +`function-rds-metrics` and written directly onto the object. + +This is the only context the AI sees. It never touches CloudWatch directly. The +control plane is the authoritative source of state for both humans and the AI. + +Or fetch just the metrics: + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.status.performanceMetrics}' | jq . +``` + +### Open the controller + +Open `operations/rds-intelligent-scaling-cron/operation.yaml` in your editor. + +That file is the entire scaling controller. The `systemPrompt` defines the +scaling logic — thresholds, instance class progression, cooldown. No Go. No +custom operator. No build pipeline. + +### Apply the controller + +```bash +kubectl apply -f operations/rds-intelligent-scaling-cron/operation.yaml +``` + +### Watch the first decision + +```bash +kubectl get cronoperation +``` + +It takes 30–45 seconds to start. Once running, watch for the first operation: + +```bash +kubectl get operations -w +``` + +Wait until an operation shows `SUCCEEDED: True`, then press Ctrl+C and describe it: + +```bash +kubectl describe operation +``` + +Look at the `Events` section. That's the AI's output — its reasoning about +whether to scale, and what it decided. + +Then check the annotation written back to the database object: + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.metadata.annotations}' | jq . +``` + +The AI's reasoning is on the object. Not a black box — the control plane is the +system of record for every decision the AI made. + +In the UXP console, navigate to `rds-metrics-database-ai-mysql` and open the +**YAML** tab. You'll see the `intelligent-scaling/last-scaled-decision` +annotation with the model's last decision. + +## Read the room + +The `CronOperation` runs every minute. CPU is low right now. Watch what the AI +decides when there's nothing to do — and understand exactly what it sees. + +### Watch operations fire + +```bash +kubectl get operations -w +``` + +A new operation appears roughly every minute. Press Ctrl+C after a few have run. + +In the UXP console, select **Operations** in the left navigation to see the +same list visually. + +### Read a decision + +Pick one of the operation names and describe it: + +```bash +kubectl describe operation +``` + +Look at the `Events` section. At low CPU, the AI should decide to hold. The +cooldown logic is also in the prompt — it won't flip the instance class every +minute even if thresholds are crossed. + +### See the current metrics + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.status.performanceMetrics}' | jq . +``` + +This is exactly what the AI sees before making a decision. Live data, on the object. + +### See the current instance class + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.spec.parameters.instanceClass}' +``` + +It's `db.t3.micro`. That's about to change. + +You can also confirm the current instance type in the [AWS Console, RDS in +us-west-2][aws-rds]. + +## Trigger a scale + +Time to put the controller under pressure. A load test drives CPU above the +scaling threshold and the AI decides to act. + +### Confirm the starting instance class + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.spec.parameters.instanceClass}' +``` + +It should be `db.t3.micro`. + +### Run the load test + +In a second terminal, run the load test from inside the `demo` directory: + +```bash +bash perf-scale-demo.sh +``` + +This hammers the database with CPU-intensive queries. The script takes 5–10 +minutes. If it finishes without triggering a scale, run it again. + +### Watch CPU climb + +In your first terminal, watch the metrics update every 10 seconds: + +```bash +watch -n 10 "kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.status.performanceMetrics.metrics}' | jq ." +``` + +### Watch the controller fire + +Press Ctrl+C to exit the watch command, then: + +```bash +kubectl get operations -w +``` + +When CPU crosses the threshold (~60%), the next `CronOperation` will decide to +scale up. Press Ctrl+C once you see a new operation start. + +### See the scale event + +Check the instance class: + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.spec.parameters.instanceClass}' +``` + +It should now be `db.t3.small`. Check the reasoning: + +```bash +kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.metadata.annotations.intelligent-scaling/last-scaled-decision}' +``` + +In the [AWS Console, RDS in us-west-2][aws-rds], refresh the database list. +The instance class change is in progress — RDS is modifying the live database. +No Terraform. No manual AWS operation. The platform handled it. + +The AI read the metrics, crossed the threshold, picked the next instance class, +and wrote its reasoning to the object. The control plane made the call. + +## Clean up + +Delete the composite resources. Crossplane deletes all composed AWS resources +(VPC, subnets, RDS instance) before removing the composite resources. + +```bash +kubectl delete sqlinstance rds-metrics-database-ai-mysql -n database-team +kubectl delete network rds-metrics-database-ai-scale -n database-team +``` + +RDS deletion takes 5–10 minutes. Wait until the `sqlinstance` is fully removed: + +```bash +kubectl get sqlinstance -n database-team -w +``` + +Once it's gone, delete the `CronOperation` and its history: + +```bash +kubectl delete cronoperation rds-intelligent-scaling-cron +kubectl delete operations --all +``` + +Stop `up project run` with Ctrl+C in that terminal, then delete the cluster: + +```bash +CLUSTER_NAME=$(kind get clusters | grep "^up-" | head -1) +kind delete cluster --name "${CLUSTER_NAME}" +``` + +## Next steps + +In this tutorial, you: + +- Provisioned a real AWS RDS instance managed as a Crossplane `SQLInstance` +- Observed live CloudWatch metrics surfaced directly on the Kubernetes object +- Deployed an AI scaling controller with a single `kubectl apply` +- Read the model's reasoning from the annotation it wrote back to the object +- Ran a load test and watched the AI scale the database automatically + +Continue with: + +- [CronOperations reference][cronops-ref] — schedules, history limits, concurrency +- [WatchOperations reference][watchops-ref] — event-driven operations +- [Composition functions][fn-docs] — build custom logic for any resource +- [Provider authentication][auth-docs] — connect providers to your own cloud account +- [Upbound Marketplace][marketplace] — providers and functions for AWS, Azure, GCP, and more + +[kubectl-install]: https://kubernetes.io/docs/tasks/tools/ +[aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html +[kind]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation +[anthropic-console]: https://console.anthropic.com/ +[aws-rds]: https://us-west-2.console.aws.amazon.com/rds/home?region=us-west-2#databases: +[cronops-ref]: /manuals/crossplane/operations/cron/ +[watchops-ref]: /manuals/crossplane/operations/watch/ +[fn-docs]: /manuals/cli/howtos/compositions/ +[auth-docs]: /manuals/packages/providers/authentication/ +[marketplace]: https://marketplace.upbound.io/ diff --git a/docs/getstarted/crossplane-tutorial.md b/docs/getstarted/platform-tutorial.md similarity index 67% rename from docs/getstarted/crossplane-tutorial.md rename to docs/getstarted/platform-tutorial.md index 99b70af9..144db5ad 100644 --- a/docs/getstarted/crossplane-tutorial.md +++ b/docs/getstarted/platform-tutorial.md @@ -13,12 +13,13 @@ validation: --- In this tutorial, you deploy an application with a PostgreSQL database on AWS, -watch Crossplane self-heal a manually changed resource, enforce security policy, -and change live infrastructure — all by updating YAML files. +watch Crossplane self-heal a manually changed resource, enforce a security +policy, and change live infrastructure — all by updating YAML files. By the end of this tutorial, you can: - Deploy a composite resource that creates multiple AWS resources from a single manifest +- Explore the providers and ProviderConfigs that connect your platform to AWS - Trigger drift detection and watch Crossplane correct an out-of-band change - Block non-compliant requests with Kyverno before they reach Crossplane - Update live infrastructure by changing desired state @@ -28,39 +29,32 @@ By the end of this tutorial, you can: Install the following tools before starting: - [`kubectl`][kubectl-install] -- [AWS CLI][aws-cli], configured with credentials for an account where you can create resources +- [AWS CLI][aws-cli], configured with credentials for an account where you can create VPCs, IAM roles, and RDS instances - [kind][kind] ### Install the up CLI -Install the `up` CLI via shell script: - ```shell curl -sL "https://cli.upbound.io" | sh ``` -If the script fails, download a specific version directly from [GitHub releases][up-cli-releases]. - Move the binary into your `PATH`: ```shell sudo mv up /usr/local/bin/ ``` -If you don't have `sudo` access, install to a user-local directory instead: +If you don't have `sudo` access: ```shell mkdir -p ~/.local/bin && mv up ~/.local/bin/ -``` - -Then add it to your `PATH` permanently by adding this line to your shell -profile (`~/.bashrc`, `~/.zshrc`, or equivalent): - -```shell export PATH="$HOME/.local/bin:$PATH" ``` -## Create the project +Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, or +equivalent) to make it permanent. + +## Set up the project ### Create the project directory @@ -69,13 +63,13 @@ mkdir platform-demo cd platform-demo ``` -All commands from this point run from inside the `platform-demo` directory. +All commands from this point run from inside `platform-demo`. ### Create the project manifest -The `upbound.yaml` file declares the project and its provider and function -dependencies. `up project run --local` reads this file to determine what -packages to install into the cluster. +The `upbound.yaml` file declares the project name and its provider and function +dependencies. `up project run --local` reads this file to install packages into +the cluster. ```bash cat > upbound.yaml <<'EOF' @@ -84,6 +78,7 @@ kind: Project metadata: name: app-w-db spec: + repository: xpkg.upbound.io/upbound/app-w-db apiDependencies: - k8s: version: v1.33.0 @@ -91,27 +86,22 @@ spec: dependsOn: - apiVersion: pkg.crossplane.io/v1 kind: Provider - # provider-family-aws installs shared config and authentication infrastructure. package: xpkg.upbound.io/upbound/provider-family-aws version: v2.4.0 - apiVersion: pkg.crossplane.io/v1 kind: Provider - # provider-aws-iam manages IAM roles and policies. package: xpkg.upbound.io/upbound/provider-aws-iam version: v2.4.0 - apiVersion: pkg.crossplane.io/v1 kind: Provider - # provider-aws-rds manages RDS instances and subnet groups. package: xpkg.upbound.io/upbound/provider-aws-rds version: v2.4.0 - apiVersion: pkg.crossplane.io/v1 kind: Provider - # provider-aws-ec2 manages VPCs and subnets. package: xpkg.upbound.io/upbound/provider-aws-ec2 version: v2.4.0 - apiVersion: pkg.crossplane.io/v1beta1 kind: Function - # function-auto-ready marks composed resources as ready automatically. package: xpkg.upbound.io/crossplane-contrib/function-auto-ready version: v0.6.1 description: A Crossplane composition that provisions a web application with a @@ -127,7 +117,7 @@ The platform exposes two APIs: `AppWDB` (a basic app with a database) and `AppWDBSecure` (the same API with an optional security context, used later for policy enforcement). -Create the API directory and XRD for `AppWDB`: +Create the `AppWDB` XRD: ```bash mkdir -p apis/appwdb @@ -187,7 +177,7 @@ spec: EOF ``` -Create the XRD for `AppWDBSecure`: +Create the `AppWDBSecure` XRD: ```bash mkdir -p apis/appwdbsecure @@ -254,62 +244,10 @@ spec: EOF ``` -### Create the Compositions - -Both APIs share the same composition function, `app-w-dbcompose-resources`, -which is the KCL function you create in the next step. - -```bash -cat > apis/appwdb/composition.yaml <<'EOF' -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - labels: - provider: aws - type: app-w-db - name: appwdbs.demo.upbound.io -spec: - compositeTypeRef: - apiVersion: demo.upbound.io/v1alpha1 - kind: AppWDB - mode: Pipeline - pipeline: - - step: compose-resources - functionRef: - name: app-w-dbcompose-resources - - step: automatically-detect-ready-composed-resources - functionRef: - name: crossplane-contrib-function-auto-ready -EOF - -cat > apis/appwdbsecure/composition.yaml <<'EOF' -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - labels: - provider: aws - type: app-w-db-secure - name: appwdbsecures.demo.upbound.io -spec: - compositeTypeRef: - apiVersion: demo.upbound.io/v1alpha1 - kind: AppWDBSecure - mode: Pipeline - pipeline: - - step: compose-resources - functionRef: - name: app-w-dbcompose-resources - - step: automatically-detect-ready-composed-resources - functionRef: - name: crossplane-contrib-function-auto-ready -EOF -``` - ### Create the composition function The composition function is a KCL program that maps the user's 10-line request -to the full set of AWS resources. Create the function directory and package -manifest: +to the full set of AWS resources. ```bash mkdir -p functions/compose-resources @@ -320,9 +258,8 @@ version = "0.1.0" EOF ``` -Create the composition logic in `main.k`. This is the entire implementation — -it reads from the composite resource and outputs every managed resource -Crossplane creates: +Create `main.k`. This file is the entire composition logic — it reads the +composite resource and outputs every managed resource Crossplane creates: ```bash cat > functions/compose-resources/main.k <<'EOF' @@ -387,7 +324,7 @@ _db_items = [{ username: "demoadmin" dbName: "appdb" autoGeneratePassword: True - passwordSecretRef: {name: "${appName}-db-password", key: "password"} + passwordSecretRef: {namespace: oxr.metadata.namespace, name: "${appName}-db-password", key: "password"} applyImmediately: True skipFinalSnapshot: True allocatedStorage: 20 @@ -469,10 +406,90 @@ items = _items EOF ``` +### Create example manifests + +Create the base example and change variants for later steps: + +```bash +mkdir -p examples/appwdb +cat > examples/appwdb/example.yaml <<'EOF' +apiVersion: demo.upbound.io/v1alpha1 +kind: AppWDB +metadata: + name: demo-01 + namespace: demo +spec: + parameters: + replicas: 2 + dbSize: db.t3.micro + region: eu-central-1 +EOF + +cat > examples/appwdb/variant-bigger-db.yaml <<'EOF' +apiVersion: demo.upbound.io/v1alpha1 +kind: AppWDB +metadata: + name: demo-01 + namespace: demo +spec: + parameters: + replicas: 2 + dbSize: db.t3.medium + region: eu-central-1 +EOF + +cat > examples/appwdb/variant-more-replicas.yaml <<'EOF' +apiVersion: demo.upbound.io/v1alpha1 +kind: AppWDB +metadata: + name: demo-01 + namespace: demo +spec: + parameters: + replicas: 5 + dbSize: db.t3.micro + region: eu-central-1 +EOF +``` + +Create the secure examples used in the policy enforcement step: + +```bash +mkdir -p examples/appwdbsecure +cat > examples/appwdbsecure/example-1.yaml <<'EOF' +apiVersion: demo.upbound.io/v1alpha1 +kind: AppWDBSecure +metadata: + name: kyverno-demo-01 + namespace: demo +spec: + parameters: + replicas: 2 + dbSize: db.t3.micro + region: eu-central-1 + securityContext: + privileged: true +EOF + +cat > examples/appwdbsecure/example-2.yaml <<'EOF' +apiVersion: demo.upbound.io/v1alpha1 +kind: AppWDBSecure +metadata: + name: kyverno-demo-01 + namespace: demo +spec: + parameters: + replicas: 2 + dbSize: db.t3.micro + region: eu-central-1 + securityContext: + privileged: false +EOF +``` + ### Create the ProviderConfig -The `ProviderConfig` tells the AWS providers where to find credentials. Create -it now — you apply it after providers are healthy. +The `ProviderConfig` tells the AWS providers where to find credentials. ```bash mkdir -p setup/config @@ -494,10 +511,8 @@ EOF ## Configure AWS credentials -The demo creates real AWS resources. You need credentials with permissions to -create VPCs, subnets, IAM roles, and RDS instances. - -Export your credentials: +The demo creates real AWS resources. Export credentials with permissions to +create VPCs, subnets, IAM roles, and RDS instances: ```bash export AWS_ACCESS_KEY_ID= @@ -506,50 +521,46 @@ export AWS_SECRET_ACCESS_KEY= ## Start the project -Open a dedicated terminal window and run from inside the `platform-demo` directory: +Open a dedicated terminal window and run from inside `platform-demo`: ```bash -up project run --local +up project run --local --ingress ``` -Leave this terminal running for the duration of the tutorial. This command: +This command: -- Creates a kind cluster named `up-app-w-db` (the default name for `up project run --local`) +- Creates a kind cluster named `up-app-w-db` - Installs UXP into the cluster - Builds and deploys the KCL composition function - Installs the AWS providers declared in `upbound.yaml` -- Applies the XRDs and Compositions from `apis/` +- Applies the XRDs from `apis/` +- Installs an ingress controller for the UXP console -Startup takes several minutes. Once the command prints output confirming the -cluster is created and providers are installing, open a second terminal, -`cd` into the `platform-demo` directory, and continue with the steps below. +Startup takes several minutes. Keep this terminal open throughout the tutorial. :::warning `up project run --local` may print `traces export: context deadline exceeded` -in stderr. This is a non-fatal telemetry export error — it does not mean -provisioning failed. Check whether providers were actually installed by running -`kubectl get providers` in the second terminal. If providers appear, continue. +in stderr. This is a non-fatal telemetry export error. Check whether providers +were installed with `kubectl get providers`. If providers appear, continue. If `up project run --local` exits non-zero AND `kubectl get providers` returns -**No resources found**, provisioning did fail. Run -`kind delete cluster --name up-app-w-db` and restart from this step. Verify -your network allows outbound connections to `xpkg.upbound.io` on port 443. +**No resources found**, provisioning failed. Run +`kind delete cluster --name up-app-w-db` and restart. Verify your network +allows outbound connections to `xpkg.upbound.io` on port 443. ::: ### Configure kubectl -Once `up project run --local` has created the cluster, point kubectl at it. -Run this in your second terminal from inside the `platform-demo` directory: +In your second terminal, point kubectl at the new cluster: ```bash kind get kubeconfig --name up-app-w-db > ~/.kube/config ``` :::warning -This overwrites your existing `~/.kube/config`. To preserve your existing -contexts, use `kind get kubeconfig --name up-app-w-db > ~/.kube/config-upbound` -and then merge: `KUBECONFIG=~/.kube/config:~/.kube/config-upbound kubectl -config view --flatten > ~/.kube/config.merged && mv ~/.kube/config.merged ~/.kube/config` +This overwrites your existing `~/.kube/config`. To preserve existing contexts, +use `kind get kubeconfig --name up-app-w-db > ~/.kube/config-upbound` and merge: +`KUBECONFIG=~/.kube/config:~/.kube/config-upbound kubectl config view --flatten > ~/.kube/config.merged && mv ~/.kube/config.merged ~/.kube/config` ::: Verify the connection: @@ -560,13 +571,13 @@ kubectl get nodes ### Apply AWS credentials -1. Create the demo namespace: +1. Create the `demo` namespace: ```bash kubectl create namespace demo ``` -2. Create a Kubernetes secret with your AWS credentials: +2. Create a secret with your AWS credentials: ```bash kubectl create secret generic aws-secret \ @@ -583,13 +594,12 @@ Check that providers are installed and healthy: kubectl get providers ``` -All providers should show `HEALTHY: True`. Keep running this command until all -show `HEALTHY: True` before continuing. +All four providers should show `HEALTHY: True`. Keep running this until they do +before continuing. :::warning -If this command returns **No resources found**, `up project run --local` did -not complete successfully. Check that terminal for errors. An empty list means -provisioning failed, not that it's still in progress. Delete the cluster with +If this returns **No resources found**, `up project run --local` did not +complete successfully. Delete the cluster with `kind delete cluster --name up-app-w-db` and restart. ::: @@ -599,13 +609,73 @@ Check that the composition function is healthy: kubectl get functions ``` -The function should show `HEALTHY: True`. +The KCL function should show `HEALTHY: True`. :::warning If this returns **No resources found**, the KCL function was not built or deployed. Check the `up project run` terminal and restart. ::: +### Apply the Compositions + +Get the exact function name assigned by `up project run`: + +```bash +FUNC_NAME=$(kubectl get functions --no-headers | grep -v 'crossplane-contrib' | awk '{print $1}') +echo $FUNC_NAME +``` + +Apply both Compositions using that name: + +```bash +cat > apis/appwdb/composition.yaml < apis/appwdbsecure/composition.yaml < Date: Tue, 5 May 2026 12:16:33 -0400 Subject: [PATCH 07/12] updates --- .../ai-database-scaling-tutorial.md | 35 +++++++++++-------- .../styles/Upbound/spelling-exceptions.txt | 3 ++ 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/docs/getstarted/ai-database-scaling-tutorial.md b/docs/getstarted/ai-database-scaling-tutorial.md index 5e0a2306..9d4ea463 100644 --- a/docs/getstarted/ai-database-scaling-tutorial.md +++ b/docs/getstarted/ai-database-scaling-tutorial.md @@ -50,10 +50,11 @@ mkdir -p ~/.local/bin && mv up ~/.local/bin/ export PATH="$HOME/.local/bin:$PATH" ``` -Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, or -equivalent) to make it permanent. +Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, etc) to make it permanent. + ### Install mysqlslap + The load test in this tutorial uses `mysqlslap`, which ships with the MySQL client tools. @@ -109,10 +110,10 @@ This command: - Installs an ingress controller for the UXP console Startup takes several minutes. The command exits when the cluster is ready. - + :::warning If `up project run --local` prints `traces export: context deadline exceeded` -in stderr, check whether providers were installed: +in stderr, check the provider status: ```bash kubectl get providers @@ -128,6 +129,7 @@ kind delete cluster --name up-$(basename "$PWD") Verify your network allows outbound connections to `xpkg.upbound.io` on port 443. ::: + ### Configure kubectl In your second terminal, point kubectl at the new cluster. `up project run --local` @@ -203,7 +205,7 @@ All four should show `HEALTHY: True` before continuing. :::warning If `kubectl get providers` or `kubectl get functions` returns **No resources found**, -`up project run --local` did not complete successfully. Delete the cluster and +`up project run --local` didn't complete successfully. Delete the cluster and restart from [Start the project](#start-the-project). ::: @@ -233,7 +235,7 @@ Wait until `READY: True`. Press Ctrl+C when it does. kubectl apply -f examples/mariadb-xr-rds-metrics.yaml ``` -RDS provisioning takes 10–15 minutes. Watch the status: +RDS provisioning takes 10 to 15 minutes. Watch the status: ```bash kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team -w @@ -241,11 +243,15 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team -w Wait until `READY: True` before continuing. Press Ctrl+C when it does. + + :::info While waiting, the `function-rds-metrics` composition step is already collecting CloudWatch data and writing it onto the object. By the time the database is ready, `status.performanceMetrics` will have live data. ::: + + ### Access the UXP console @@ -283,23 +289,23 @@ the resources Crossplane provisioned. ### Verify the AWS resource -In the [AWS Console, RDS in us-west-2][aws-rds] — look for +In the [AWS Console, RDS in `us-west-2`][aws-rds] find `rds-metrics-database-ai-mysql`. The Kubernetes object and the AWS resource are the same thing. Crossplane is the bridge. -### See what the AI will read +### Find the performance metrics ```bash kubectl describe sqlinstance rds-metrics-database-ai-mysql -n database-team ``` -Find the `status.performanceMetrics` block. That's live CloudWatch data — CPU -utilization, active connections, free storage — collected by -`function-rds-metrics` and written directly onto the object. +Find the `status.performanceMetrics` block. This block contains live CloudWatch data like CPU +utilization, active connections, free storage. `function-rds-metrics` collects +this data and writes it into the object. -This is the only context the AI sees. It never touches CloudWatch directly. The +The AI can only access this context. It never touches CloudWatch directly. The control plane is the authoritative source of state for both humans and the AI. Or fetch just the metrics: @@ -313,9 +319,10 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ Open `operations/rds-intelligent-scaling-cron/operation.yaml` in your editor. + That file is the entire scaling controller. The `systemPrompt` defines the -scaling logic — thresholds, instance class progression, cooldown. No Go. No -custom operator. No build pipeline. +scaling logic like thresholds, instance class progression, cooldown. + ### Apply the controller diff --git a/utils/vale/styles/Upbound/spelling-exceptions.txt b/utils/vale/styles/Upbound/spelling-exceptions.txt index f82746d1..c3932a47 100644 --- a/utils/vale/styles/Upbound/spelling-exceptions.txt +++ b/utils/vale/styles/Upbound/spelling-exceptions.txt @@ -142,6 +142,9 @@ UXP uxp vCluster vcluster +VPC +VPCs +VPC's virtualized Velero VMs From 9107601ab8e3b8f3cf388481ae34ceb46f7bb321 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 6 May 2026 13:29:08 -0400 Subject: [PATCH 08/12] region updates and vale errors --- docs/getstarted/ai-controller-tutorial.md | 148 ++++++------- .../ai-database-scaling-tutorial.md | 126 ++++------- docs/getstarted/platform-tutorial.md | 203 +++++++----------- .../styles/Upbound/spelling-exceptions.txt | 4 + 4 files changed, 192 insertions(+), 289 deletions(-) diff --git a/docs/getstarted/ai-controller-tutorial.md b/docs/getstarted/ai-controller-tutorial.md index fe7bd103..363d14ab 100644 --- a/docs/getstarted/ai-controller-tutorial.md +++ b/docs/getstarted/ai-controller-tutorial.md @@ -1,6 +1,6 @@ --- title: Build an AI controller with Crossplane -description: Deploy a WatchOperation that uses a local LLM to enforce platform policy — no Go, no operator framework, just YAML and a plain-English rule. +description: Deploy a WatchOperation that uses a local LLM to enforce platform policy. weight: {weight} validation: type: walkthrough @@ -11,9 +11,9 @@ validation: HOST_IP: "" --- -In this tutorial, you run a Kubernetes controller whose reconciliation logic is -written in plain English. A Crossplane `WatchOperation` watches an nginx -`Deployment` and calls a local LLM whenever it changes. The LLM reads the +In this tutorial, you run a Kubernetes controller with reconciliation logic in +plain English. A Crossplane `WatchOperation` watches an nginx `Deployment` and +calls a local LLM whenever it changes. The LLM reads the current state, applies the rule in its `systemPrompt`, and returns a corrected manifest. Crossplane applies it. @@ -23,8 +23,8 @@ By the end of this tutorial, you can: - Watch the controller detect and correct a policy violation automatically - Update the enforcement rule by editing a single field in YAML -The model running in this tutorial is `qwen2.5:1.5b` via Ollama — running -entirely on your local machine. No cloud API key is required. +The model in this tutorial is `qwen3.5:latest`, running locally via Ollama. +No cloud API key required. ## Prerequisites @@ -55,8 +55,7 @@ mkdir -p ~/.local/bin && mv up ~/.local/bin/ export PATH="$HOME/.local/bin:$PATH" ``` -Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, or -equivalent) to make it permanent. +Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`) to make it permanent. Verify the installation: @@ -66,38 +65,35 @@ up version ## Create the project -### Create the project directory +### Initialize the project + +Scaffold a new project with `up project init`. This creates the +`english-controller/` directory with a valid `upbound.yaml` and the standard +project layout (`apis/`, `functions/`, `examples/`, `tests/`): ```bash -mkdir english-controller +up project init --scratch english-controller cd english-controller ``` All commands from this point run from inside the `english-controller` directory. -### Create the project manifest +### Add function dependencies + +The controller uses two Crossplane functions: `function-auto-ready` so the +`WatchOperation` reports ready status, and `function-openai` to call the LLM. +Add them as project dependencies: ```bash -cat > upbound.yaml <<'EOF' -apiVersion: meta.dev.upbound.io/v2alpha1 -kind: Project -metadata: - name: english-controller -spec: - dependsOn: - - apiVersion: pkg.crossplane.io/v1 - kind: Function - package: xpkg.upbound.io/crossplane-contrib/function-auto-ready - version: '>=v0.0.0' - - apiVersion: pkg.crossplane.io/v1 - kind: Function - package: xpkg.upbound.io/upbound/function-openai - version: v0.3.0 - description: A Kubernetes controller whose enforcement logic is written in plain English. -EOF +up dependency add 'xpkg.upbound.io/crossplane-contrib/function-auto-ready' +up dependency add 'xpkg.upbound.io/upbound/function-openai:v0.3.0' ``` +`up dependency add` records each dependency in `upbound.yaml`. + + ### Create the WatchOperation + The `WatchOperation` is the controller. It watches the nginx `Deployment` and calls `upbound-function-openai` whenever it changes. The function sends the @@ -148,14 +144,14 @@ EOF ``` :::info -The explicit output instructions in `userPrompt` are needed for `qwen2.5:1.5b`. -With a larger model like `gpt-4o`, the `systemPrompt` can be much simpler — just -the rule itself, without format guidance. +The explicit output instructions in `userPrompt` are necessary for `qwen3.5:latest`. +With a larger model like `gpt-4o`, the `systemPrompt` can contain just the rule +itself, without format guidance. ::: ### Create the nginx deployment -Create the starting state — 1 replica. The AI controller will correct this. +Create the starting state of 1 replica. The AI controller corrects this. ```bash mkdir -p examples @@ -182,11 +178,11 @@ spec: name: nginx EOF ``` - + ## Set up Ollama Ollama runs the LLM locally. Install it and pull the model before starting the -cluster — the model is ~1 GB. +cluster. The model is ~1 GB. ### Install Ollama @@ -198,10 +194,12 @@ If the install script doesn't work for your OS, download directly from [ollama.com/download][ollama-download]. ### Start Ollama - -On Linux, the install script registers a systemd service that starts Ollama + +On Linux, the install script registers a `systemd` service that starts Ollama automatically. On macOS, start it manually in a separate terminal if `ollama list` returns "could not connect to ollama server": + + ```shell ollama serve @@ -210,7 +208,7 @@ ollama serve ### Pull the model ```shell -ollama pull qwen2.5:1.5b +ollama pull qwen3.5:latest ``` Confirm the model downloaded: @@ -219,7 +217,7 @@ Confirm the model downloaded: ollama list ``` -You should see `qwen2.5:1.5b` in the output. +You should see `qwen3.5:latest` in the output. ## Start the project @@ -233,21 +231,8 @@ This creates a kind cluster, installs UXP, and deploys the function packages declared in `upbound.yaml`. It exits when the cluster is ready. :::warning -If `up project run --local` exits non-zero and prints `traces export: context -deadline exceeded`, check whether functions were installed: - -```bash -kubectl get functions -``` - -If functions appear, provisioning succeeded despite the telemetry error. -If the list is empty, delete the cluster and retry: - -```bash -kind delete cluster --name up-english-controller -``` - -Verify your network allows outbound connections to `xpkg.upbound.io` on port 443. +`up project run --local` may print `traces export: context deadline exceeded`. +This message reports a telemetry timeout and doesn't affect the cluster setup. ::: ### Configure kubectl @@ -279,19 +264,12 @@ kubectl get nodes The kind cluster's pods need to reach Ollama running on your host. Create a Kubernetes `Service` and `Endpoints` that route cluster traffic to your machine. -1. Get the host IP on the kind bridge network: - - **Linux:** +1. Get the host's IPv4 address as seen from inside the cluster. This command + works on Linux, macOS, and Windows: ```bash - HOST_IP=$(docker network inspect kind -f '{{range .IPAM.Config}}{{.Gateway}}{{end}}') - echo "Host IP: $HOST_IP" - ``` - - **macOS (Docker Desktop):** - - ```bash - HOST_IP=$(docker run --rm alpine sh -c 'getent hosts host.docker.internal' 2>/dev/null | awk '{print $1}') + HOST_IP=$(docker run --rm --add-host=host.docker.internal:host-gateway alpine \ + getent hosts host.docker.internal | awk '$1 ~ /^[0-9.]+$/ {print $1; exit}') echo "Host IP: $HOST_IP" ``` @@ -336,7 +314,7 @@ Kubernetes `Service` and `Endpoints` that route cluster traffic to your machine. stringData: OPENAI_API_KEY: ollama OPENAI_BASE_URL: http://${HOST_IP}:11434/v1 - OPENAI_MODEL: qwen2.5:1.5b + OPENAI_MODEL: qwen3.5:latest EOF ``` @@ -357,7 +335,7 @@ Wait until `upbound-function-openai` shows `HEALTHY: True`. :::warning If `kubectl get functions` returns **No resources found**, `up project run ---local` did not complete successfully. Delete the cluster with +--local` didn't complete. Delete the cluster with `kind delete cluster --name up-english-controller` and restart from [Start the project](#start-the-project). ::: @@ -391,10 +369,12 @@ kubectl get deployment nginx `READY 1/1` is the starting point. + ### Apply the WatchOperation + Crossplane Operations are Kubernetes objects that run logic against your cluster -on a trigger. There are three kinds: +on a trigger. | Kind | Trigger | |------|---------| @@ -417,7 +397,7 @@ The `WatchOperation` fires immediately because the `Deployment` already exists. kubectl get deployment nginx -w ``` -Within 60–90 seconds, replicas jump from 1 to 3. The LLM read the `Deployment`, +Within 60 to 90 seconds, replicas jump from 1 to 3. The LLM read the `Deployment`, decided it violated the rule, and patched it. Press Ctrl+C when replicas reach 3. @@ -443,9 +423,7 @@ controller applied. ## Watch it heal The `WatchOperation` re-evaluates on every change. If anything modifies the -`Deployment` — a human, a CI pipeline, a rollout — the rule re-applies. This is -drift detection with reasoning: not just "was this field changed" but "does this -still satisfy the intent?" +`Deployment`, the rule re-applies. ### Scale down nginx @@ -459,7 +437,7 @@ kubectl scale deployment nginx --replicas=1 kubectl get deployment nginx -w ``` -Within 30–60 seconds, replicas climb back to 3. The `WatchOperation` fired +Within 30 to 60 seconds, replicas climb back to 3. The `WatchOperation` fired because the `Deployment` changed. The LLM saw 1 replica, decided it violated the rule, and patched it. @@ -482,14 +460,13 @@ kubectl get secret gpt -n crossplane-system -o yaml ``` `OPENAI_BASE_URL` points to Ollama's OpenAI-compatible API running locally on -your machine — no data leaves the machine. Change that URL to +your machine, so no data leaves the machine. Change that URL to `https://api.openai.com/v1` and update `OPENAI_MODEL`, and the `WatchOperation` works identically. ## Change the rules -The enforcement logic is a text field. To change the policy, edit `systemPrompt` -and re-apply. No code change. No build pipeline. No rollout. +To change the policy, edit `systemPrompt` and re-apply. ### Update the minimum replicas to 5 @@ -523,8 +500,8 @@ sed -i 's/less than 3, set it to 3/less than 5, set it to 5/' \ ``` :::info -With `qwen2.5:1.5b`, keep the full `userPrompt` output instructions in place. -The explicit YAML template keeps the small model's output reliable. With a +With `qwen3.5:latest`, keep the full `userPrompt` output instructions in place. +The explicit YAML template keeps the local model's output reliable. With a larger model like `gpt-4o`, you can remove the `userPrompt` entirely and keep only the rule in `systemPrompt`. ::: @@ -549,7 +526,7 @@ Watch the updated rule enforce 5 replicas: kubectl get deployment nginx -w ``` -This takes 30–45 seconds. Press Ctrl+C when you see 5 ready replicas. +This takes 30 to 45 seconds. Press Ctrl+C when you see 5 ready replicas. ### Verify @@ -558,8 +535,6 @@ kubectl get watchoperations kubectl get operations ``` -Same architecture, different policy — changed by editing a text field. - :::tip Try adding a conditional rule to the `systemPrompt`: @@ -569,8 +544,7 @@ Otherwise, require at least 2. ``` The model interprets natural language conditions the same way it interprets -simple numeric rules. Any platform engineer can read the rule, change it, and -version it in Git — without writing Go. +numeric rules. ::: ## Clean up @@ -600,11 +574,11 @@ In this tutorial, you: Continue with: -- [WatchOperations reference][watchops-ref] — triggers, concurrency, history limits, and output handling -- [CronOperations reference][cronops-ref] — schedule-driven operations -- [Composition functions][fn-docs] — build custom logic for any resource -- [Provider authentication][auth-docs] — connect providers to your own cloud account -- [Upbound Marketplace][marketplace] — functions and providers for AWS, Azure, GCP, and more +- [WatchOperations reference][watchops-ref]: triggers, concurrency, history limits, and output handling +- [CronOperations reference][cronops-ref]: schedule-driven operations +- [Composition functions][fn-docs]: build custom logic for any resource +- [Provider authentication][auth-docs]: connect providers to your own cloud account +- [Upbound Marketplace][marketplace]: functions and providers for AWS, Azure, GCP, and more [docker-install]: https://docs.docker.com/get-docker/ [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ diff --git a/docs/getstarted/ai-database-scaling-tutorial.md b/docs/getstarted/ai-database-scaling-tutorial.md index 9d4ea463..60bc1815 100644 --- a/docs/getstarted/ai-database-scaling-tutorial.md +++ b/docs/getstarted/ai-database-scaling-tutorial.md @@ -1,6 +1,6 @@ --- title: AI-driven database scaling with Crossplane -description: Deploy an AI controller that reads live RDS metrics and scales a database automatically — no Go, no custom operator, just a YAML CronOperation and a plain-English system prompt. +description: Deploy an AI controller that reads live RDS metrics and scales a database automatically. weight: {weight} validation: type: walkthrough @@ -18,8 +18,6 @@ A `CronOperation` runs every minute. It reads live CloudWatch metrics from the database object, calls Claude, and decides whether to scale. If it scales, it writes its reasoning back to the object as an annotation. -No Go. No custom operator. No build pipeline. The controller is a single YAML file. - By the end of this tutorial, you can: - See live CloudWatch metrics surfaced directly on a Crossplane `SQLInstance` object @@ -50,7 +48,7 @@ mkdir -p ~/.local/bin && mv up ~/.local/bin/ export PATH="$HOME/.local/bin:$PATH" ``` -Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, etc) to make it permanent. +Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`) to make it permanent. ### Install mysqlslap @@ -110,26 +108,12 @@ This command: - Installs an ingress controller for the UXP console Startup takes several minutes. The command exits when the cluster is ready. - -:::warning -If `up project run --local` prints `traces export: context deadline exceeded` -in stderr, check the provider status: - -```bash -kubectl get providers -``` - -If providers appear, provisioning succeeded despite the telemetry error. -If the list is empty, delete the cluster and retry: -```bash -kind delete cluster --name up-$(basename "$PWD") -``` - -Verify your network allows outbound connections to `xpkg.upbound.io` on port 443. +:::warning +`up project run --local` may print `traces export: context deadline exceeded`. +This message reports a telemetry timeout and doesn't affect the cluster setup. ::: - ### Configure kubectl In your second terminal, point kubectl at the new cluster. `up project run --local` @@ -205,8 +189,8 @@ All four should show `HEALTHY: True` before continuing. :::warning If `kubectl get providers` or `kubectl get functions` returns **No resources found**, -`up project run --local` didn't complete successfully. Delete the cluster and -restart from [Start the project](#start-the-project). +`up project run --local` didn't complete. Delete the cluster and restart from +[Start the project](#start-the-project). ::: ### Apply the ProviderConfig @@ -227,7 +211,7 @@ Wait for the network composite resource to become ready (~5 minutes): kubectl get network rds-metrics-database-ai-scale -n database-team -w ``` -Wait until `READY: True`. Press Ctrl+C when it does. +Press Ctrl+C once it shows `READY: True`. ### Provision the database @@ -241,17 +225,13 @@ RDS provisioning takes 10 to 15 minutes. Watch the status: kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team -w ``` -Wait until `READY: True` before continuing. Press Ctrl+C when it does. +Press Ctrl+C once it shows `READY: True` before continuing. - - :::info -While waiting, the `function-rds-metrics` composition step is already +While you wait, the `function-rds-metrics` composition step is already collecting CloudWatch data and writing it onto the object. By the time the -database is ready, `status.performanceMetrics` will have live data. +database is ready, `status.performanceMetrics` contains live data. ::: - - ### Access the UXP console @@ -289,24 +269,19 @@ the resources Crossplane provisioned. ### Verify the AWS resource -In the [AWS Console, RDS in `us-west-2`][aws-rds] find +In the [AWS Console, RDS in `us-east-1`][aws-rds], find `rds-metrics-database-ai-mysql`. -The Kubernetes object and the AWS resource are the same thing. Crossplane is -the bridge. - ### Find the performance metrics ```bash kubectl describe sqlinstance rds-metrics-database-ai-mysql -n database-team ``` -Find the `status.performanceMetrics` block. This block contains live CloudWatch data like CPU -utilization, active connections, free storage. `function-rds-metrics` collects -this data and writes it into the object. - -The AI can only access this context. It never touches CloudWatch directly. The -control plane is the authoritative source of state for both humans and the AI. +Find the `status.performanceMetrics` block. This block contains live +CloudWatch data such as CPU utilization, active connections, and free storage. +`function-rds-metrics` collects this data and writes it into the object. The +AI reads only this block and never queries CloudWatch directly. Or fetch just the metrics: @@ -319,10 +294,8 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ Open `operations/rds-intelligent-scaling-cron/operation.yaml` in your editor. - That file is the entire scaling controller. The `systemPrompt` defines the -scaling logic like thresholds, instance class progression, cooldown. - +scaling logic, including thresholds, instance class progression, and cooldown. ### Apply the controller @@ -336,7 +309,7 @@ kubectl apply -f operations/rds-intelligent-scaling-cron/operation.yaml kubectl get cronoperation ``` -It takes 30–45 seconds to start. Once running, watch for the first operation: +The `CronOperation` takes 30 to 45 seconds to start. Once it's running, watch for the first operation: ```bash kubectl get operations -w @@ -348,8 +321,7 @@ Wait until an operation shows `SUCCEEDED: True`, then press Ctrl+C and describe kubectl describe operation ``` -Look at the `Events` section. That's the AI's output — its reasoning about -whether to scale, and what it decided. +The `Events` section shows the AI's reasoning and decision. Then check the annotation written back to the database object: @@ -358,17 +330,14 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ -o jsonpath='{.metadata.annotations}' | jq . ``` -The AI's reasoning is on the object. Not a black box — the control plane is the -system of record for every decision the AI made. - In the UXP console, navigate to `rds-metrics-database-ai-mysql` and open the -**YAML** tab. You'll see the `intelligent-scaling/last-scaled-decision` -annotation with the model's last decision. +**YAML** tab. The `intelligent-scaling/last-scaled-decision` annotation +contains the model's last decision. -## Read the room +## Watch the controller idle -The `CronOperation` runs every minute. CPU is low right now. Watch what the AI -decides when there's nothing to do — and understand exactly what it sees. +The `CronOperation` runs every minute. CPU is low, so watch what the AI decides +when there's nothing to do. ### Watch operations fire @@ -389,9 +358,9 @@ Pick one of the operation names and describe it: kubectl describe operation ``` -Look at the `Events` section. At low CPU, the AI should decide to hold. The -cooldown logic is also in the prompt — it won't flip the instance class every -minute even if thresholds are crossed. +Look at the `Events` section. At low CPU, the AI decides to hold. The cooldown +logic is also in the prompt, so it doesn't flip the instance class every minute +even if thresholds are crossed. ### See the current metrics @@ -400,7 +369,7 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ -o jsonpath='{.status.performanceMetrics}' | jq . ``` -This is exactly what the AI sees before making a decision. Live data, on the object. +This is the same data the AI reads before making a decision. ### See the current instance class @@ -409,15 +378,15 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ -o jsonpath='{.spec.parameters.instanceClass}' ``` -It's `db.t3.micro`. That's about to change. +It's `db.t3.micro`. You can also confirm the current instance type in the [AWS Console, RDS in -us-west-2][aws-rds]. +us-east-1][aws-rds]. ## Trigger a scale -Time to put the controller under pressure. A load test drives CPU above the -scaling threshold and the AI decides to act. +Run a load test that drives CPU above the scaling threshold so the AI decides +to act. ### Confirm the starting instance class @@ -436,8 +405,8 @@ In a second terminal, run the load test from inside the `demo` directory: bash perf-scale-demo.sh ``` -This hammers the database with CPU-intensive queries. The script takes 5–10 -minutes. If it finishes without triggering a scale, run it again. +The script sends CPU-intensive queries to the database for 5 to 10 minutes. +If it finishes without triggering a scale, run it again. ### Watch CPU climb @@ -456,7 +425,7 @@ Press Ctrl+C to exit the watch command, then: kubectl get operations -w ``` -When CPU crosses the threshold (~60%), the next `CronOperation` will decide to +When CPU crosses the threshold (~60%), the next `CronOperation` decides to scale up. Press Ctrl+C once you see a new operation start. ### See the scale event @@ -475,12 +444,9 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ -o jsonpath='{.metadata.annotations.intelligent-scaling/last-scaled-decision}' ``` -In the [AWS Console, RDS in us-west-2][aws-rds], refresh the database list. -The instance class change is in progress — RDS is modifying the live database. -No Terraform. No manual AWS operation. The platform handled it. - -The AI read the metrics, crossed the threshold, picked the next instance class, -and wrote its reasoning to the object. The control plane made the call. +In the [AWS Console, RDS in us-east-1][aws-rds], refresh the database list. +The instance class change is in progress, and RDS is modifying the live +database. ## Clean up @@ -492,7 +458,7 @@ kubectl delete sqlinstance rds-metrics-database-ai-mysql -n database-team kubectl delete network rds-metrics-database-ai-scale -n database-team ``` -RDS deletion takes 5–10 minutes. Wait until the `sqlinstance` is fully removed: +RDS deletion takes 5 to 10 minutes. Wait until the `sqlinstance` is fully removed: ```bash kubectl get sqlinstance -n database-team -w @@ -505,7 +471,7 @@ kubectl delete cronoperation rds-intelligent-scaling-cron kubectl delete operations --all ``` -Stop `up project run` with Ctrl+C in that terminal, then delete the cluster: +Delete the cluster: ```bash CLUSTER_NAME=$(kind get clusters | grep "^up-" | head -1) @@ -524,17 +490,17 @@ In this tutorial, you: Continue with: -- [CronOperations reference][cronops-ref] — schedules, history limits, concurrency -- [WatchOperations reference][watchops-ref] — event-driven operations -- [Composition functions][fn-docs] — build custom logic for any resource -- [Provider authentication][auth-docs] — connect providers to your own cloud account -- [Upbound Marketplace][marketplace] — providers and functions for AWS, Azure, GCP, and more +- [CronOperations reference][cronops-ref]: schedules, history limits, concurrency +- [WatchOperations reference][watchops-ref]: event-driven operations +- [Composition functions][fn-docs]: build custom logic for any resource +- [Provider authentication][auth-docs]: connect providers to your own cloud account +- [Upbound Marketplace][marketplace]: providers and functions for AWS, Azure, GCP, and more [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ [aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html [kind]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation [anthropic-console]: https://console.anthropic.com/ -[aws-rds]: https://us-west-2.console.aws.amazon.com/rds/home?region=us-west-2#databases: +[aws-rds]: https://us-east-1.console.aws.amazon.com/rds/home?region=us-east-1#databases: [cronops-ref]: /manuals/crossplane/operations/cron/ [watchops-ref]: /manuals/crossplane/operations/watch/ [fn-docs]: /manuals/cli/howtos/compositions/ diff --git a/docs/getstarted/platform-tutorial.md b/docs/getstarted/platform-tutorial.md index 144db5ad..ba5678e0 100644 --- a/docs/getstarted/platform-tutorial.md +++ b/docs/getstarted/platform-tutorial.md @@ -1,6 +1,6 @@ --- title: Build a platform with Upbound -description: Deploy a real app with a cloud database, observe drift detection, enforce policies, and change infrastructure live — all from a single control plane. +description: Deploy a real app with a cloud database, observe drift detection, enforce policies, and change infrastructure live, all from a single control plane. weight: {weight} validation: type: walkthrough @@ -12,9 +12,9 @@ validation: AWS_SECRET_ACCESS_KEY: "" --- -In this tutorial, you deploy an application with a PostgreSQL database on AWS, -watch Crossplane self-heal a manually changed resource, enforce a security -policy, and change live infrastructure — all by updating YAML files. +In this tutorial, you deploy an application with a PostgreSQL database on AWS. +You use Upbound Crossplane to manage resources, enforce security policy, and +change infrastructure. By the end of this tutorial, you can: @@ -51,66 +51,38 @@ mkdir -p ~/.local/bin && mv up ~/.local/bin/ export PATH="$HOME/.local/bin:$PATH" ``` -Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`, or -equivalent) to make it permanent. +Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`) to make it permanent. ## Set up the project -### Create the project directory +### Initialize the project + +Scaffold a new project with `up project init`. This creates the `app-w-db/` +directory with a valid `upbound.yaml` and the standard project layout +(`apis/`, `functions/`, `examples/`, `tests/`): ```bash -mkdir platform-demo -cd platform-demo +up project init --scratch app-w-db +cd app-w-db ``` -All commands from this point run from inside `platform-demo`. +All commands from this point run from inside the `app-w-db` directory. -### Create the project manifest +### Add provider and function dependencies -The `upbound.yaml` file declares the project name and its provider and function -dependencies. `up project run --local` reads this file to install packages into -the cluster. +The platform composes AWS resources and uses `function-auto-ready` so composite +resources report ready status. Add them as project dependencies: ```bash -cat > upbound.yaml <<'EOF' -apiVersion: meta.dev.upbound.io/v2alpha1 -kind: Project -metadata: - name: app-w-db -spec: - repository: xpkg.upbound.io/upbound/app-w-db - apiDependencies: - - k8s: - version: v1.33.0 - type: k8s - dependsOn: - - apiVersion: pkg.crossplane.io/v1 - kind: Provider - package: xpkg.upbound.io/upbound/provider-family-aws - version: v2.4.0 - - apiVersion: pkg.crossplane.io/v1 - kind: Provider - package: xpkg.upbound.io/upbound/provider-aws-iam - version: v2.4.0 - - apiVersion: pkg.crossplane.io/v1 - kind: Provider - package: xpkg.upbound.io/upbound/provider-aws-rds - version: v2.4.0 - - apiVersion: pkg.crossplane.io/v1 - kind: Provider - package: xpkg.upbound.io/upbound/provider-aws-ec2 - version: v2.4.0 - - apiVersion: pkg.crossplane.io/v1beta1 - kind: Function - package: xpkg.upbound.io/crossplane-contrib/function-auto-ready - version: v0.6.1 - description: A Crossplane composition that provisions a web application with a - managed database (RDS), networking (VPC/Subnets), IAM role, and a Kubernetes Deployment. - license: Apache-2.0 - maintainer: Upbound User -EOF +up dependency add 'xpkg.upbound.io/upbound/provider-family-aws:v2.4.0' +up dependency add 'xpkg.upbound.io/upbound/provider-aws-iam:v2.4.0' +up dependency add 'xpkg.upbound.io/upbound/provider-aws-rds:v2.4.0' +up dependency add 'xpkg.upbound.io/upbound/provider-aws-ec2:v2.4.0' +up dependency add 'xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.6.1' ``` +`up dependency add` records each dependency in `upbound.yaml`. + ### Define the platform APIs The platform exposes two APIs: `AppWDB` (a basic app with a database) and @@ -163,7 +135,7 @@ spec: description: RDS instance class region: type: string - default: eu-central-1 + default: us-east-1 description: AWS region required: - parameters @@ -223,7 +195,7 @@ spec: description: RDS instance class region: type: string - default: eu-central-1 + default: us-east-1 description: AWS region securityContext: type: object @@ -258,7 +230,7 @@ version = "0.1.0" EOF ``` -Create `main.k`. This file is the entire composition logic — it reads the +Create `main.k`. This file is the entire composition logic. It reads the composite resource and outputs every managed resource Crossplane creates: ```bash @@ -268,7 +240,7 @@ ocds = option("params").ocds params = oxr.spec.parameters appName = oxr.metadata.name -region = params.region or "eu-central-1" +region = params.region or "us-east-1" dbSize = params.dbSize or "db.t3.micro" replicas = params.replicas or 2 @@ -408,7 +380,7 @@ EOF ### Create example manifests -Create the base example and change variants for later steps: +Create the base example and the variants used in later steps: ```bash mkdir -p examples/appwdb @@ -422,7 +394,7 @@ spec: parameters: replicas: 2 dbSize: db.t3.micro - region: eu-central-1 + region: us-east-1 EOF cat > examples/appwdb/variant-bigger-db.yaml <<'EOF' @@ -435,7 +407,7 @@ spec: parameters: replicas: 2 dbSize: db.t3.medium - region: eu-central-1 + region: us-east-1 EOF cat > examples/appwdb/variant-more-replicas.yaml <<'EOF' @@ -448,7 +420,7 @@ spec: parameters: replicas: 5 dbSize: db.t3.micro - region: eu-central-1 + region: us-east-1 EOF ``` @@ -466,7 +438,7 @@ spec: parameters: replicas: 2 dbSize: db.t3.micro - region: eu-central-1 + region: us-east-1 securityContext: privileged: true EOF @@ -481,7 +453,7 @@ spec: parameters: replicas: 2 dbSize: db.t3.micro - region: eu-central-1 + region: us-east-1 securityContext: privileged: false EOF @@ -521,7 +493,7 @@ export AWS_SECRET_ACCESS_KEY= ## Start the project -Open a dedicated terminal window and run from inside `platform-demo`: +Open a dedicated terminal window and run from inside `app-w-db`: ```bash up project run --local --ingress @@ -539,14 +511,8 @@ This command: Startup takes several minutes. Keep this terminal open throughout the tutorial. :::warning -`up project run --local` may print `traces export: context deadline exceeded` -in stderr. This is a non-fatal telemetry export error. Check whether providers -were installed with `kubectl get providers`. If providers appear, continue. - -If `up project run --local` exits non-zero AND `kubectl get providers` returns -**No resources found**, provisioning failed. Run -`kind delete cluster --name up-app-w-db` and restart. Verify your network -allows outbound connections to `xpkg.upbound.io` on port 443. +`up project run --local` may print `traces export: context deadline exceeded`. +This message reports a telemetry timeout and doesn't affect the cluster setup. ::: ### Configure kubectl @@ -594,13 +560,12 @@ Check that providers are installed and healthy: kubectl get providers ``` -All four providers should show `HEALTHY: True`. Keep running this until they do -before continuing. +Wait until all four providers show `HEALTHY: True` before continuing. :::warning -If this returns **No resources found**, `up project run --local` did not -complete successfully. Delete the cluster with -`kind delete cluster --name up-app-w-db` and restart. +If this returns **No resources found**, `up project run --local` didn't +complete. Delete the cluster with `kind delete cluster --name up-app-w-db` and +restart. ::: Check that the composition function is healthy: @@ -612,11 +577,11 @@ kubectl get functions The KCL function should show `HEALTHY: True`. :::warning -If this returns **No resources found**, the KCL function was not built or +If this returns **No resources found**, the KCL function wasn't built or deployed. Check the `up project run` terminal and restart. ::: -### Apply the Compositions +### Apply the compositions Get the exact function name assigned by `up project run`: @@ -698,7 +663,7 @@ kubectl apply -f setup/config/ ``` :::info -AWS resource provisioning — especially RDS — takes 5–8 minutes. Each section +AWS resource provisioning takes 5 to 8 minutes, especially for RDS. Each section of this tutorial is structured so you can keep reading while AWS works. ::: @@ -722,8 +687,8 @@ viewing resource relationship graphs, and checking sync status. 3. Open `http://localhost:8080` in your browser. The console shows every composite resource, the tree of composed resources it -manages, and their sync status. You'll use it throughout this tutorial to -complement `kubectl` output. +manages, and their sync status. Use it throughout this tutorial to complement +`kubectl` output. ## Deploy an app with a database @@ -751,18 +716,14 @@ subnets, IAM role, and RDS configuration are handled by the platform. Those 10 lines create: -- VPC + 3 subnets (eu-central-1a, b, c) +- VPC + 3 subnets (us-east-1a, b, c) - RDS subnet group + PostgreSQL instance (gp3 storage) - IAM role - Kubernetes `Deployment` scaled to `replicas: 2` -The platform end user never sees any of that. They requested `AppWDB`. The -platform decided what that means. - -Open the AWS Console and set your region to **eu-central-1**. Check: -- **IAM → Roles** — look for `demo-01-role` -- **VPC → Your VPCs** — look for `demo-01-vpc` -- **RDS → Databases** — watch for `demo-01-db` (~5–8 minutes) +Open the AWS Console and set your region to **us-east-1**. Look for +`demo-01-role` under **IAM → Roles**, `demo-01-vpc` under **VPC → Your VPCs**, +and `demo-01-db` under **RDS → Databases** (about 5 to 8 minutes). In the UXP console, click into `demo-01` and open the **relationship view** to see all composed resources and their sync status. @@ -771,13 +732,13 @@ see all composed resources and their sync status. Open `apis/appwdb/definition.yaml`. -This is the XRD — the API your end users interact with. The `dbSize` field is +This is the XRD, the API your end users interact with. The `dbSize` field is an enum, not a free-text field. Users can't request a size the platform doesn't support. Open `apis/appwdb/composition.yaml`. -This is the Composition — the mapping from those 10 lines to all the AWS +This is the Composition, the mapping from those 10 lines to all the AWS resources. It calls the KCL function you created. You can also write Composition functions in [Go][fn-go], [Python][fn-python], or [Go Templating][fn-go-template], and mix languages within a single pipeline. @@ -786,25 +747,25 @@ Open `functions/compose-resources/main.k`. This is the logic layer. It reads `dbSize` and `replicas` from the composite resource and outputs every managed resource Crossplane creates. The platform -team owns and maintains this file — end users never touch it. +team owns and maintains this file. End users never touch it. ## Explore the control plane A **control plane** is software that continuously watches desired state and -reconciles actual state to match it — not once, but always. Crossplane turns a -Kubernetes cluster into a control plane for all infrastructure and applications. +reconciles actual state to match it. Crossplane turns a Kubernetes cluster +into a control plane for all infrastructure and applications. **Composite Resources** are the custom APIs your platform exposes. The file you applied in `examples/appwdb/example.yaml` is a Composite Resource. Instead of giving end users raw AWS access, the platform team defines higher-level -abstractions like `AppWDB` — and end users request those. +abstractions like `AppWDB`, and end users request those. ### Providers and ProviderConfigs **Providers** are how Crossplane talks to external systems like AWS. Each -provider is a Kubernetes controller that manages a specific service — EC2, RDS, -IAM, and so on. In Crossplane 2.0, the Kubernetes `Deployment` for your app is -composed natively — no separate Kubernetes provider needed. +provider is a Kubernetes controller that manages a specific service such as +EC2, RDS, or IAM. In Crossplane 2.0, the Kubernetes `Deployment` for your app +is composed natively, with no separate Kubernetes provider needed. **ProviderConfigs** tell providers how to authenticate. This demo uses a `Secret`-based `ProviderConfig`, but each provider supports multiple @@ -838,9 +799,9 @@ all 8 composed resources, their sync status, and how they connect. ## Drift detection -Crossplane never stops watching. If someone changes a resource directly in AWS, -Crossplane detects the difference between desired state and actual state and -corrects it. This is drift detection. +If someone changes a resource directly in AWS, Crossplane detects the +difference between desired state and actual state and corrects it. This is +drift detection. ### Trigger drift @@ -854,7 +815,7 @@ corrects it. This is drift detection. 2. In the AWS Console, go to **VPC → Your VPCs** and find `demo-01-vpc`. -3. Click the **Name** tag and change it to something else — for example, +3. Click the **Name** tag and change it to something else, such as `demo-01-vpc-hacked`. Refresh to confirm the change took effect. 4. Tell Crossplane to reconcile immediately instead of waiting for the next loop: @@ -874,8 +835,7 @@ corrects it. This is drift detection. 6. Switch to the AWS Console and watch the Name tag snap back to `demo-01-vpc`. -The control plane detected the drift and corrected it — not at the next CI run, -right now. +The control plane detected the drift and corrected it. ### Verify recovery @@ -888,7 +848,7 @@ kubectl get appwdb demo-01 -n demo ## Add policy enforcement Kyverno is a policy engine that intercepts Kubernetes admission requests before -they're accepted. A policy violation is blocked before Crossplane runs — +they're accepted. A policy violation is blocked before Crossplane runs, so nothing reaches AWS. ### Install Kyverno @@ -913,9 +873,9 @@ nothing reaches AWS. kubectl apply -f w-kyverno/addon-kyverno.yaml ``` -3. In the UXP console, select **AddOns** in the left navigation — you should - see `upbound-addon-kyverno` appear and become healthy (~2 minutes). Or watch - from the terminal: +3. In the UXP console, select **AddOns** in the left navigation. The + `upbound-addon-kyverno` entry appears and becomes healthy in about two + minutes. Or watch from the terminal: ```bash kubectl get addons.pkg.upbound.io upbound-addon-kyverno -w @@ -941,7 +901,7 @@ nothing reaches AWS. policies.kyverno.io/description: >- Privileged containers have unrestricted access to the host system. This policy blocks any AppWDBSecure request with securityContext.privileged: true - before Crossplane composes any resources — nothing reaches AWS. + before Crossplane composes any resources, so nothing reaches AWS. spec: validationFailureAction: Enforce background: false @@ -1015,11 +975,11 @@ Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. ``` The request is blocked immediately. The error references - `disallow-privileged-containers`. Nothing was created — Kyverno stopped it - before Crossplane even saw the request. + `disallow-privileged-containers`. Nothing was created. Kyverno stopped the + request before Crossplane saw it. - `demo-01` — deployed before Kyverno was installed — has a running RDS - instance right now. This one didn't even start. + `demo-01`, deployed before Kyverno was installed, has a running RDS + instance. This request didn't start one. ### Apply a compliant request @@ -1039,9 +999,8 @@ Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. ## Change it live -To change infrastructure, update the desired state. Crossplane figures out what -needs to change and does it — the same interface for a human, a GitOps pipeline, -or an AI agent. +To change infrastructure, update the desired state. Crossplane figures out +what needs to change and does it. ### Option A: Scale the database @@ -1093,21 +1052,21 @@ your updated values. ## Clean up Delete the composite resources. Crossplane deletes all composed AWS resources -before removing the composite resource. +before removing each composite resource. ```shell kubectl delete appwdbsecure kyverno-demo-01 -n demo kubectl delete appwdb demo-01 -n demo ``` -RDS deletion takes 5–10 minutes. Wait until both are fully removed: +RDS deletion takes 5 to 10 minutes. Wait until both are fully removed: ```shell kubectl get appwdb -n demo -w kubectl get appwdbsecure -n demo -w ``` -Once both are gone, stop `up project run` with Ctrl+C, then delete the cluster: +Delete the cluster: ```shell kind delete cluster --name up-app-w-db @@ -1127,10 +1086,10 @@ In this tutorial, you: Continue with: -- [Composite Resource Definitions][xrd-concept] — design your own platform APIs -- [Composition functions][fn-docs] — write the logic that maps user requests to resources -- [Provider authentication][auth-docs] — connect providers to your own cloud account -- [Upbound Marketplace][marketplace] — providers and add-ons for AWS, Azure, GCP, and more +- [Composite Resource Definitions][xrd-concept]: design your own platform APIs +- [Composition functions][fn-docs]: write the logic that maps user requests to resources +- [Provider authentication][auth-docs]: connect providers to your own cloud account +- [Upbound Marketplace][marketplace]: providers and add-ons for AWS, Azure, GCP, and more [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ [up-cli-releases]: https://github.com/upbound/up/releases diff --git a/utils/vale/styles/Upbound/spelling-exceptions.txt b/utils/vale/styles/Upbound/spelling-exceptions.txt index c3932a47..06285da1 100644 --- a/utils/vale/styles/Upbound/spelling-exceptions.txt +++ b/utils/vale/styles/Upbound/spelling-exceptions.txt @@ -88,6 +88,10 @@ namespaces namespaced Netlify OAuth +Ollama +ollama's +Ollama's +ollama Okta OTEL overengineer From d46ef9300311075fa9014ad51d51a43187a15537 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 6 May 2026 13:47:26 -0400 Subject: [PATCH 09/12] update prereqs --- docs/getstarted/ai-controller-tutorial.md | 32 ++----------------- .../ai-database-scaling-tutorial.md | 18 ++--------- docs/getstarted/platform-tutorial.md | 27 ++-------------- 3 files changed, 7 insertions(+), 70 deletions(-) diff --git a/docs/getstarted/ai-controller-tutorial.md b/docs/getstarted/ai-controller-tutorial.md index 363d14ab..103dd2fc 100644 --- a/docs/getstarted/ai-controller-tutorial.md +++ b/docs/getstarted/ai-controller-tutorial.md @@ -33,40 +33,11 @@ Install the following before starting: - [Docker][docker-install], running locally - [`kubectl`][kubectl-install] - [`kind`][kind-install] +- [`up CLI`][up-cli] v0.44.3 or later -### Install the up CLI - -This tutorial requires up CLI v0.44.3. - -```shell -curl -sL "https://cli.upbound.io" | VERSION=v0.44.3 sh -``` - -Move the binary into your `PATH`: - -```shell -sudo mv up /usr/local/bin/ -``` - -If you don't have `sudo` access: - -```shell -mkdir -p ~/.local/bin && mv up ~/.local/bin/ -export PATH="$HOME/.local/bin:$PATH" -``` - -Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`) to make it permanent. - -Verify the installation: - -```shell -up version -``` ## Create the project -### Initialize the project - Scaffold a new project with `up project init`. This creates the `english-controller/` directory with a valid `upbound.yaml` and the standard project layout (`apis/`, `functions/`, `examples/`, `tests/`): @@ -580,6 +551,7 @@ Continue with: - [Provider authentication][auth-docs]: connect providers to your own cloud account - [Upbound Marketplace][marketplace]: functions and providers for AWS, Azure, GCP, and more +[up-cli]: /manuals/cli/overview/ [docker-install]: https://docs.docker.com/get-docker/ [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ [kind-install]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation diff --git a/docs/getstarted/ai-database-scaling-tutorial.md b/docs/getstarted/ai-database-scaling-tutorial.md index 60bc1815..51955a54 100644 --- a/docs/getstarted/ai-database-scaling-tutorial.md +++ b/docs/getstarted/ai-database-scaling-tutorial.md @@ -33,22 +33,7 @@ Install the following tools before starting: - [AWS CLI][aws-cli], configured with credentials that can create VPCs and RDS instances - [kind][kind] - An [Anthropic API key][anthropic-console] with access to Claude - -### Install the up CLI - -```shell -curl -sL "https://cli.upbound.io" | sh -sudo mv up /usr/local/bin/ -``` - -If you don't have `sudo` access: - -```shell -mkdir -p ~/.local/bin && mv up ~/.local/bin/ -export PATH="$HOME/.local/bin:$PATH" -``` - -Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`) to make it permanent. +- [`up CLI`][up-cli] v0.44.3 or later ### Install mysqlslap @@ -496,6 +481,7 @@ Continue with: - [Provider authentication][auth-docs]: connect providers to your own cloud account - [Upbound Marketplace][marketplace]: providers and functions for AWS, Azure, GCP, and more +[up-cli]: /manuals/cli/overview/ [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ [aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html [kind]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation diff --git a/docs/getstarted/platform-tutorial.md b/docs/getstarted/platform-tutorial.md index ba5678e0..61dfc339 100644 --- a/docs/getstarted/platform-tutorial.md +++ b/docs/getstarted/platform-tutorial.md @@ -31,31 +31,9 @@ Install the following tools before starting: - [`kubectl`][kubectl-install] - [AWS CLI][aws-cli], configured with credentials for an account where you can create VPCs, IAM roles, and RDS instances - [kind][kind] +- [`up CLI`][up-cli] v0.44.3 or later -### Install the up CLI - -```shell -curl -sL "https://cli.upbound.io" | sh -``` - -Move the binary into your `PATH`: - -```shell -sudo mv up /usr/local/bin/ -``` - -If you don't have `sudo` access: - -```shell -mkdir -p ~/.local/bin && mv up ~/.local/bin/ -export PATH="$HOME/.local/bin:$PATH" -``` - -Add the `export` line to your shell profile (`~/.bashrc`, `~/.zshrc`) to make it permanent. - -## Set up the project - -### Initialize the project +## Create the project Scaffold a new project with `up project init`. This creates the `app-w-db/` directory with a valid `upbound.yaml` and the standard project layout @@ -1091,6 +1069,7 @@ Continue with: - [Provider authentication][auth-docs]: connect providers to your own cloud account - [Upbound Marketplace][marketplace]: providers and add-ons for AWS, Azure, GCP, and more +[up-cli]: /manuals/cli/overview/ [kubectl-install]: https://kubernetes.io/docs/tasks/tools/ [up-cli-releases]: https://github.com/upbound/up/releases [aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html From a63ec2d9f01e7186e5ac66520db49b6a76844f4b Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 6 May 2026 13:57:09 -0400 Subject: [PATCH 10/12] update vale --- docs/getstarted/ai-controller-tutorial.md | 4 +- .../ai-database-scaling-tutorial.md | 22 +++--- docs/getstarted/platform-tutorial.md | 75 ++++++++++--------- .../styles/Upbound/spelling-exceptions.txt | 1 + 4 files changed, 52 insertions(+), 50 deletions(-) diff --git a/docs/getstarted/ai-controller-tutorial.md b/docs/getstarted/ai-controller-tutorial.md index 103dd2fc..6fbccf86 100644 --- a/docs/getstarted/ai-controller-tutorial.md +++ b/docs/getstarted/ai-controller-tutorial.md @@ -158,7 +158,7 @@ cluster. The model is ~1 GB. ### Install Ollama ```shell -curl -fsSL https://ollama.com/install.sh | sh +curl -fsSL "https://ollama.com/install.sh" | sh ``` If the install script doesn't work for your OS, download directly from @@ -230,7 +230,7 @@ Verify the connection: kubectl get nodes ``` -### Wire Ollama into the cluster +### Connect the host to the cluster The kind cluster's pods need to reach Ollama running on your host. Create a Kubernetes `Service` and `Endpoints` that route cluster traffic to your machine. diff --git a/docs/getstarted/ai-database-scaling-tutorial.md b/docs/getstarted/ai-database-scaling-tutorial.md index 51955a54..8603a7c3 100644 --- a/docs/getstarted/ai-database-scaling-tutorial.md +++ b/docs/getstarted/ai-database-scaling-tutorial.md @@ -248,9 +248,9 @@ kubectl get sqlinstance -n database-team You should see `rds-metrics-database-ai-mysql` with `READY: True`. That's a real AWS RDS instance, managed as a Kubernetes object. -In the UXP console, click **View all Composite Resources**. You'll see -`rds-metrics-database-ai-mysql` listed. Click **Relationship View** to see -the resources Crossplane provisioned. +In the UXP console, click **View all Composite Resources**. The +`rds-metrics-database-ai-mysql` entry appears in the list. Click +**Relationship View** to see the resources Crossplane provisioned. ### Verify the AWS resource @@ -324,13 +324,13 @@ contains the model's last decision. The `CronOperation` runs every minute. CPU is low, so watch what the AI decides when there's nothing to do. -### Watch operations fire +### Watch operations run ```bash kubectl get operations -w ``` -A new operation appears roughly every minute. Press Ctrl+C after a few have run. +A new operation appears every minute. Press Ctrl+C after several have run. In the UXP console, select **Operations** in the left navigation to see the same list visually. @@ -345,7 +345,7 @@ kubectl describe operation Look at the `Events` section. At low CPU, the AI decides to hold. The cooldown logic is also in the prompt, so it doesn't flip the instance class every minute -even if thresholds are crossed. +even if usage crosses the thresholds. ### See the current metrics @@ -354,7 +354,7 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ -o jsonpath='{.status.performanceMetrics}' | jq . ``` -This is the same data the AI reads before making a decision. +The AI reads this same data before making a decision. ### See the current instance class @@ -366,7 +366,7 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ It's `db.t3.micro`. You can also confirm the current instance type in the [AWS Console, RDS in -us-east-1][aws-rds]. +`us-east-1`][aws-rds]. ## Trigger a scale @@ -393,7 +393,7 @@ bash perf-scale-demo.sh The script sends CPU-intensive queries to the database for 5 to 10 minutes. If it finishes without triggering a scale, run it again. -### Watch CPU climb +### Watch the metrics climb In your first terminal, watch the metrics update every 10 seconds: @@ -402,7 +402,7 @@ watch -n 10 "kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-t -o jsonpath='{.status.performanceMetrics.metrics}' | jq ." ``` -### Watch the controller fire +### Watch the controller act Press Ctrl+C to exit the watch command, then: @@ -429,7 +429,7 @@ kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ -o jsonpath='{.metadata.annotations.intelligent-scaling/last-scaled-decision}' ``` -In the [AWS Console, RDS in us-east-1][aws-rds], refresh the database list. +In the [AWS Console, RDS in `us-east-1`][aws-rds], refresh the database list. The instance class change is in progress, and RDS is modifying the live database. diff --git a/docs/getstarted/platform-tutorial.md b/docs/getstarted/platform-tutorial.md index 61dfc339..b3ce8ad0 100644 --- a/docs/getstarted/platform-tutorial.md +++ b/docs/getstarted/platform-tutorial.md @@ -532,7 +532,7 @@ kubectl get nodes ### Verify the setup -Check that providers are installed and healthy: +Check that all four providers report healthy: ```bash kubectl get providers @@ -619,7 +619,7 @@ kubectl apply -f apis/appwdb/composition.yaml kubectl apply -f apis/appwdbsecure/composition.yaml ``` -Verify the APIs are established: +Verify both XRDs reach the `Established` condition: ```bash kubectl get xrds @@ -641,8 +641,8 @@ kubectl apply -f setup/config/ ``` :::info -AWS resource provisioning takes 5 to 8 minutes, especially for RDS. Each section -of this tutorial is structured so you can keep reading while AWS works. +AWS resource provisioning takes 5 to 8 minutes for RDS. Each section of this +tutorial gives you something to read while AWS works. ::: ### Access the UXP console @@ -671,8 +671,8 @@ manages, and their sync status. Use it throughout this tutorial to complement ## Deploy an app with a database The end-user interface for this platform is a 10-line manifest. A developer -fills in three fields: replica count, database size, and AWS region. The VPC, -subnets, IAM role, and RDS configuration are handled by the platform. +fills in three fields: replica count, database size, and AWS region. The +platform handles the VPC, subnets, IAM role, and RDS configuration. 1. Apply the example manifest: @@ -694,12 +694,12 @@ subnets, IAM role, and RDS configuration are handled by the platform. Those 10 lines create: -- VPC + 3 subnets (us-east-1a, b, c) +- VPC + 3 subnets (`us-east-1a`, `us-east-1b`, `us-east-1c`) - RDS subnet group + PostgreSQL instance (gp3 storage) - IAM role - Kubernetes `Deployment` scaled to `replicas: 2` -Open the AWS Console and set your region to **us-east-1**. Look for +Open the AWS Console and set your region to **`us-east-1`**. Look for `demo-01-role` under **IAM → Roles**, `demo-01-vpc` under **VPC → Your VPCs**, and `demo-01-db` under **RDS → Databases** (about 5 to 8 minutes). @@ -710,22 +710,22 @@ see all composed resources and their sync status. Open `apis/appwdb/definition.yaml`. -This is the XRD, the API your end users interact with. The `dbSize` field is -an enum, not a free-text field. Users can't request a size the platform doesn't -support. +The XRD defines the API your end users interact with. The `dbSize` field is +an enum, not a free-text field, so users can't request a size the platform +doesn't support. Open `apis/appwdb/composition.yaml`. -This is the Composition, the mapping from those 10 lines to all the AWS -resources. It calls the KCL function you created. You can also write Composition -functions in [Go][fn-go], [Python][fn-python], or [Go Templating][fn-go-template], -and mix languages within a single pipeline. +The Composition maps those 10 lines to all the AWS resources. It calls the +KCL function you created. You can also write Composition functions in +[Go][fn-go], [Python][fn-python], or [Go Templating][fn-go-template], and mix +languages within a single pipeline. Open `functions/compose-resources/main.k`. -This is the logic layer. It reads `dbSize` and `replicas` from the composite -resource and outputs every managed resource Crossplane creates. The platform -team owns and maintains this file. End users never touch it. +The logic layer reads `dbSize` and `replicas` from the composite resource and +outputs every managed resource Crossplane creates. The platform team owns and +maintains this file. End users never edit it. ## Explore the control plane @@ -742,8 +742,9 @@ abstractions like `AppWDB`, and end users request those. **Providers** are how Crossplane talks to external systems like AWS. Each provider is a Kubernetes controller that manages a specific service such as -EC2, RDS, or IAM. In Crossplane 2.0, the Kubernetes `Deployment` for your app -is composed natively, with no separate Kubernetes provider needed. +EC2, RDS, or IAM. In Crossplane 2.0, Crossplane composes the Kubernetes +`Deployment` for your app natively, with no separate Kubernetes provider +needed. **ProviderConfigs** tell providers how to authenticate. This demo uses a `Secret`-based `ProviderConfig`, but each provider supports multiple @@ -778,12 +779,12 @@ all 8 composed resources, their sync status, and how they connect. ## Drift detection If someone changes a resource directly in AWS, Crossplane detects the -difference between desired state and actual state and corrects it. This is -drift detection. +difference between desired state and actual state and corrects it. Crossplane +calls this drift detection. ### Trigger drift -1. Verify the VPC is synced: +1. Verify the VPC reached `SYNCED: True`: ```bash kubectl get vpcs.ec2.aws.m.upbound.io demo-01-vpc -n demo @@ -826,10 +827,10 @@ kubectl get appwdb demo-01 -n demo ## Add policy enforcement Kyverno is a policy engine that intercepts Kubernetes admission requests before -they're accepted. A policy violation is blocked before Crossplane runs, so +they're accepted. Kyverno blocks a policy violation before Crossplane runs, so nothing reaches AWS. -### Install Kyverno +### Install the policy engine 1. Create the Kyverno add-on manifest: @@ -927,8 +928,8 @@ nothing reaches AWS. Warning: the kind defined in the all match resource is invalid: unable to convert GVK to GVR for kinds AppWDBSecure ``` - This is expected if the XRDs were recently established and doesn't prevent - the policy from enforcing once the CRD is ready. + You can ignore this warning if Crossplane recently created the XRDs. Once + the CRD is ready, the policy enforces. 6. Verify the policy is active: @@ -941,9 +942,9 @@ nothing reaches AWS. ### Block a privileged request :::warning -Kyverno can only evaluate requests for resource types whose CRDs are installed. -If you see `no matches for kind "AppWDBSecure"`, the XRD is not installed. -Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. +Kyverno can only check requests for resource types whose CRDs already exist in +the cluster. If you see `no matches for kind "AppWDBSecure"`, the XRD isn't +ready yet. Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. ::: 1. Try to apply a request with `privileged: true`: @@ -952,12 +953,12 @@ Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. kubectl apply -f examples/appwdbsecure/example-1.yaml ``` - The request is blocked immediately. The error references - `disallow-privileged-containers`. Nothing was created. Kyverno stopped the - request before Crossplane saw it. + Kyverno blocks the request immediately. The error references + `disallow-privileged-containers`. Crossplane never sees the request, so + nothing reaches AWS. - `demo-01`, deployed before Kyverno was installed, has a running RDS - instance. This request didn't start one. + `demo-01`, which you deployed before adding Kyverno, still has a running + RDS instance. This request didn't start one. ### Apply a compliant request @@ -980,7 +981,7 @@ Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. To change infrastructure, update the desired state. Crossplane figures out what needs to change and does it. -### Option A: Scale the database +### Scale the database 1. Apply the change: @@ -1003,7 +1004,7 @@ what needs to change and does it. kubectl get appwdb demo-01 -n demo ``` -### Option B: Scale replicas +### Scale the replicas 1. Apply the change: diff --git a/utils/vale/styles/Upbound/spelling-exceptions.txt b/utils/vale/styles/Upbound/spelling-exceptions.txt index 06285da1..1d141c7d 100644 --- a/utils/vale/styles/Upbound/spelling-exceptions.txt +++ b/utils/vale/styles/Upbound/spelling-exceptions.txt @@ -30,6 +30,7 @@ configmap Cognito Commonmark conformant +cooldown Config ConfigMap ConfigMaps From 4b00b33bb8502c2d4d1ee456f6e37bd1cb124ec5 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 6 May 2026 14:12:44 -0400 Subject: [PATCH 11/12] links --- docs/getstarted/ai-controller-tutorial.md | 8 ++++---- docs/getstarted/ai-database-scaling-tutorial.md | 6 +++--- docs/getstarted/platform-tutorial.md | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/getstarted/ai-controller-tutorial.md b/docs/getstarted/ai-controller-tutorial.md index 6fbccf86..944d391e 100644 --- a/docs/getstarted/ai-controller-tutorial.md +++ b/docs/getstarted/ai-controller-tutorial.md @@ -557,9 +557,9 @@ Continue with: [kind-install]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation [ollama-download]: https://ollama.com/download [up-cli-releases]: https://github.com/upbound/up/releases -[uxp-releases]: /reference/release-notes/ -[watchops-ref]: /manuals/crossplane/operations/watch/ -[cronops-ref]: /manuals/crossplane/operations/cron/ -[fn-docs]: /manuals/cli/howtos/compositions/ +[uxp-releases]: /reference/release-notes/uxp +[cronops-ref]: /manuals/uxp/concepts/operations/cron-operation/ +[watchops-ref]: /manuals/uxp/concepts/operations/watch-operation/ +[fn-docs]: /manuals/uxp/concepts/composition/ [auth-docs]: /manuals/packages/providers/authentication/ [marketplace]: https://marketplace.upbound.io/ diff --git a/docs/getstarted/ai-database-scaling-tutorial.md b/docs/getstarted/ai-database-scaling-tutorial.md index 8603a7c3..d3623923 100644 --- a/docs/getstarted/ai-database-scaling-tutorial.md +++ b/docs/getstarted/ai-database-scaling-tutorial.md @@ -487,8 +487,8 @@ Continue with: [kind]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation [anthropic-console]: https://console.anthropic.com/ [aws-rds]: https://us-east-1.console.aws.amazon.com/rds/home?region=us-east-1#databases: -[cronops-ref]: /manuals/crossplane/operations/cron/ -[watchops-ref]: /manuals/crossplane/operations/watch/ -[fn-docs]: /manuals/cli/howtos/compositions/ +[cronops-ref]: /manuals/uxp/concepts/operations/cron-operation/ +[watchops-ref]: /manuals/uxp/concepts/operations/watch-operation/ +[fn-docs]: /manuals/uxp/concepts/composition/ [auth-docs]: /manuals/packages/providers/authentication/ [marketplace]: https://marketplace.upbound.io/ diff --git a/docs/getstarted/platform-tutorial.md b/docs/getstarted/platform-tutorial.md index b3ce8ad0..d7d73461 100644 --- a/docs/getstarted/platform-tutorial.md +++ b/docs/getstarted/platform-tutorial.md @@ -1078,7 +1078,7 @@ Continue with: [fn-go]: /manuals/cli/howtos/compositions/go/ [fn-python]: /manuals/cli/howtos/compositions/python/ [fn-go-template]: /manuals/cli/howtos/compositions/go-template/ -[xrd-concept]: /manuals/packages/xrds/ -[fn-docs]: /manuals/cli/howtos/compositions/ +[xrd-concept]:/manuals/uxp/concepts/composition/composite-resource-definitions/ +[fn-docs]: /manuals/uxp/concepts/composition/overview/ [auth-docs]: /manuals/packages/providers/authentication/ [marketplace]: https://marketplace.upbound.io/ From d45249d464d997963ab29f047545673d7e345636 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 6 May 2026 14:29:24 -0400 Subject: [PATCH 12/12] update header organization --- docs/getstarted/ai-controller-tutorial.md | 289 ++++++++--------- .../ai-database-scaling-tutorial.md | 301 +++++++++--------- docs/getstarted/platform-tutorial.md | 57 +--- 3 files changed, 291 insertions(+), 356 deletions(-) diff --git a/docs/getstarted/ai-controller-tutorial.md b/docs/getstarted/ai-controller-tutorial.md index 944d391e..ddce85cb 100644 --- a/docs/getstarted/ai-controller-tutorial.md +++ b/docs/getstarted/ai-controller-tutorial.md @@ -49,8 +49,6 @@ cd english-controller All commands from this point run from inside the `english-controller` directory. -### Add function dependencies - The controller uses two Crossplane functions: `function-auto-ready` so the `WatchOperation` reports ready status, and `function-openai` to call the LLM. Add them as project dependencies: @@ -62,10 +60,6 @@ up dependency add 'xpkg.upbound.io/upbound/function-openai:v0.3.0' `up dependency add` records each dependency in `upbound.yaml`. - -### Create the WatchOperation - - The `WatchOperation` is the controller. It watches the nginx `Deployment` and calls `upbound-function-openai` whenever it changes. The function sends the current resource state to the LLM along with the `systemPrompt` rule. The LLM @@ -120,9 +114,8 @@ With a larger model like `gpt-4o`, the `systemPrompt` can contain just the rule itself, without format guidance. ::: -### Create the nginx deployment - -Create the starting state of 1 replica. The AI controller corrects this. +Create the starting nginx `Deployment` with 1 replica. The AI controller +corrects this. ```bash mkdir -p examples @@ -151,44 +144,44 @@ EOF ``` ## Set up Ollama + -Ollama runs the LLM locally. Install it and pull the model before starting the -cluster. The model is ~1 GB. +Ollama runs the LLM locally. Install it, start it, and pull the model before +starting the cluster. The model is ~1 GB. -### Install Ollama +1. Install Ollama: -```shell -curl -fsSL "https://ollama.com/install.sh" | sh -``` + ```shell + curl -fsSL "https://ollama.com/install.sh" | sh + ``` -If the install script doesn't work for your OS, download directly from -[ollama.com/download][ollama-download]. + If the install script doesn't work for your OS, download directly from + [ollama.com/download][ollama-download]. -### Start Ollama -On Linux, the install script registers a `systemd` service that starts Ollama -automatically. On macOS, start it manually in a separate terminal if -`ollama list` returns "could not connect to ollama server": - +2. Start Ollama. On Linux, the install script registers a `systemd` service + that starts Ollama automatically. On macOS, start it manually in a + separate terminal if `ollama list` returns "could not connect to ollama + server": -```shell -ollama serve -``` + ```shell + ollama serve + ``` -### Pull the model +3. Pull the model: -```shell -ollama pull qwen3.5:latest -``` + ```shell + ollama pull qwen3.5:latest + ``` -Confirm the model downloaded: +4. Confirm the model downloaded: -```shell -ollama list -``` + ```shell + ollama list + ``` -You should see `qwen3.5:latest` in the output. + You should see `qwen3.5:latest` in the output. ## Start the project @@ -206,7 +199,7 @@ declared in `upbound.yaml`. It exits when the cluster is ready. This message reports a telemetry timeout and doesn't affect the cluster setup. ::: -### Configure kubectl +Point kubectl at the new cluster: ```bash kind get kubeconfig --name up-english-controller > ~/.kube/config @@ -230,8 +223,6 @@ Verify the connection: kubectl get nodes ``` -### Connect the host to the cluster - The kind cluster's pods need to reach Ollama running on your host. Create a Kubernetes `Service` and `Endpoints` that route cluster traffic to your machine. @@ -294,8 +285,6 @@ Kubernetes `Service` and `Endpoints` that route cluster traffic to your machine. set `OPENAI_API_KEY` to your API key, and update `OPENAI_MODEL`. The `WatchOperation` works identically regardless of which model runs. -### Verify the setup - Wait for `function-openai` to become healthy: ```bash @@ -311,8 +300,6 @@ If `kubectl get functions` returns **No resources found**, `up project run [Start the project](#start-the-project). ::: -### Apply the starting state - Apply the nginx `Deployment` at 1 replica: ```bash @@ -332,20 +319,8 @@ You should see `READY: 1/1`. An nginx `Deployment` is running in the cluster with only 1 replica. Apply the `WatchOperation` and watch it fix that. -### See the current state - -```bash -kubectl get deployment nginx -``` - -`READY 1/1` is the starting point. - - -### Apply the WatchOperation - - -Crossplane Operations are Kubernetes objects that run logic against your cluster -on a trigger. +Crossplane Operations are Kubernetes objects that run logic against your +cluster on a trigger: | Kind | Trigger | |------|---------| @@ -353,158 +328,158 @@ on a trigger. | `CronOperation` | On a schedule | | `Operation` | Once, on demand | -This tutorial uses a `WatchOperation`. It watches the nginx `Deployment` and +This tutorial uses a `WatchOperation` that watches the nginx `Deployment` and calls an LLM every time it changes. -```bash -kubectl apply -f operations/replicas/operation.yaml -``` +1. Confirm the starting state: + + ```bash + kubectl get deployment nginx + ``` -The `WatchOperation` fires immediately because the `Deployment` already exists. + `READY 1/1` is the starting point. -### Watch it act +2. Apply the `WatchOperation`. It fires immediately because the `Deployment` + already exists: -```bash -kubectl get deployment nginx -w -``` + ```bash + kubectl apply -f operations/replicas/operation.yaml + ``` -Within 60 to 90 seconds, replicas jump from 1 to 3. The LLM read the `Deployment`, -decided it violated the rule, and patched it. +3. Watch the controller act: -Press Ctrl+C when replicas reach 3. + ```bash + kubectl get deployment nginx -w + ``` -### Inspect the operation records + Within 60 to 90 seconds, replicas jump from 1 to 3. The LLM read the + `Deployment`, decided it violated the rule, and patched it. Press Ctrl+C + when replicas reach 3. -Each `Operation` object is a record of a single invocation. +4. Inspect the operation records. Each `Operation` object captures a single + invocation: -```bash -kubectl get watchoperations -kubectl get operations -``` + ```bash + kubectl get watchoperations + kubectl get operations + ``` -Pick one of the operation names and describe it: +5. Describe one of the operations: -```bash -kubectl describe operation -``` + ```bash + kubectl describe operation + ``` -The `Events` section shows the exact YAML the model returned and what the -controller applied. + The `Events` section shows the exact YAML the model returned and what the + controller applied. ## Watch it heal The `WatchOperation` re-evaluates on every change. If anything modifies the `Deployment`, the rule re-applies. -### Scale down nginx - -```bash -kubectl scale deployment nginx --replicas=1 -``` +1. Scale nginx down to 1 replica: -### Watch the controller heal it + ```bash + kubectl scale deployment nginx --replicas=1 + ``` -```bash -kubectl get deployment nginx -w -``` +2. Watch the controller heal it: -Within 30 to 60 seconds, replicas climb back to 3. The `WatchOperation` fired -because the `Deployment` changed. The LLM saw 1 replica, decided it violated -the rule, and patched it. + ```bash + kubectl get deployment nginx -w + ``` -Press Ctrl+C when replicas are back at 3. + Within 30 to 60 seconds, replicas climb back to 3. The `WatchOperation` + fired because the `Deployment` changed. The LLM saw 1 replica, decided it + violated the rule, and patched it. Press Ctrl+C when replicas are back at 3. -### See what fired +3. See what fired: -```bash -kubectl get watchoperations -kubectl get operations -``` + ```bash + kubectl get watchoperations + kubectl get operations + ``` -Each entry is a record of what fired, what the model decided, and what changed. -The most recent one captured the scale-down event and the correction. + Each entry records what fired, what the model decided, and what changed. + The most recent one captured the scale-down event and the correction. -### See where the model runs +4. See where the model runs: -```bash -kubectl get secret gpt -n crossplane-system -o yaml -``` + ```bash + kubectl get secret gpt -n crossplane-system -o yaml + ``` -`OPENAI_BASE_URL` points to Ollama's OpenAI-compatible API running locally on -your machine, so no data leaves the machine. Change that URL to -`https://api.openai.com/v1` and update `OPENAI_MODEL`, and the -`WatchOperation` works identically. + `OPENAI_BASE_URL` points to Ollama's OpenAI-compatible API running locally + on your machine, so no data leaves the machine. Change that URL to + `https://api.openai.com/v1` and update `OPENAI_MODEL`, and the + `WatchOperation` works identically. ## Change the rules -To change the policy, edit `systemPrompt` and re-apply. - -### Update the minimum replicas to 5 - -Open `operations/replicas/operation.yaml`. Find the `systemPrompt` and change -the rule line from: - -```text -Rule: if spec.replicas is less than 3, set it to 3. Otherwise keep it unchanged. -``` +To change the policy, edit `systemPrompt` and re-apply. This example raises the +minimum from 3 to 5 replicas. -To: +1. Open `operations/replicas/operation.yaml`. Find the `systemPrompt` and + change the rule line from: -```text -Rule: if spec.replicas is less than 5, set it to 5. Otherwise keep it unchanged. -``` + ```text + Rule: if spec.replicas is less than 3, set it to 3. Otherwise keep it unchanged. + ``` -Edit the file directly: + to: -**macOS:** + ```text + Rule: if spec.replicas is less than 5, set it to 5. Otherwise keep it unchanged. + ``` -```bash -sed -i '' 's/less than 3, set it to 3/less than 5, set it to 5/' \ - operations/replicas/operation.yaml -``` + Or edit in place. On macOS: -**Linux:** + ```bash + sed -i '' 's/less than 3, set it to 3/less than 5, set it to 5/' \ + operations/replicas/operation.yaml + ``` -```bash -sed -i 's/less than 3, set it to 3/less than 5, set it to 5/' \ - operations/replicas/operation.yaml -``` + On Linux: -:::info -With `qwen3.5:latest`, keep the full `userPrompt` output instructions in place. -The explicit YAML template keeps the local model's output reliable. With a -larger model like `gpt-4o`, you can remove the `userPrompt` entirely and keep -only the rule in `systemPrompt`. -::: + ```bash + sed -i 's/less than 3, set it to 3/less than 5, set it to 5/' \ + operations/replicas/operation.yaml + ``` -### Apply the updated operation + :::info + With `qwen3.5:latest`, keep the full `userPrompt` output instructions in + place. The explicit YAML template keeps the local model's output reliable. + With a larger model like `gpt-4o`, you can remove the `userPrompt` entirely + and keep only the rule in `systemPrompt`. + ::: -```bash -kubectl apply -f operations/replicas/operation.yaml -``` +2. Apply the updated operation: -### Trigger and observe + ```bash + kubectl apply -f operations/replicas/operation.yaml + ``` -Scale nginx down to 1: +3. Trigger the rule by scaling nginx down to 1: -```bash -kubectl scale deployment nginx --replicas=1 -``` + ```bash + kubectl scale deployment nginx --replicas=1 + ``` -Watch the updated rule enforce 5 replicas: +4. Watch the updated rule enforce 5 replicas: -```bash -kubectl get deployment nginx -w -``` + ```bash + kubectl get deployment nginx -w + ``` -This takes 30 to 45 seconds. Press Ctrl+C when you see 5 ready replicas. + This takes 30 to 45 seconds. Press Ctrl+C when you see 5 ready replicas. -### Verify +5. Inspect the operation history to verify the new rule fired: -```bash -kubectl get watchoperations -kubectl get operations -``` + ```bash + kubectl get watchoperations + kubectl get operations + ``` :::tip Try adding a conditional rule to the `systemPrompt`: diff --git a/docs/getstarted/ai-database-scaling-tutorial.md b/docs/getstarted/ai-database-scaling-tutorial.md index d3623923..fb3b43b5 100644 --- a/docs/getstarted/ai-database-scaling-tutorial.md +++ b/docs/getstarted/ai-database-scaling-tutorial.md @@ -36,20 +36,17 @@ Install the following tools before starting: - [`up CLI`][up-cli] v0.44.3 or later -### Install mysqlslap +The load test later uses `mysqlslap`, which ships with the MySQL client tools. -The load test in this tutorial uses `mysqlslap`, which ships with the MySQL -client tools. - -**macOS:** +On macOS: ```shell brew install mysql-client export PATH="$(brew --prefix mysql-client)/bin:$PATH" ``` -**Linux (Debian/Ubuntu):** +On Linux (Debian/Ubuntu): ```shell apt-get install -y mysql-client @@ -99,8 +96,6 @@ Startup takes several minutes. The command exits when the cluster is ready. This message reports a telemetry timeout and doesn't affect the cluster setup. ::: -### Configure kubectl - In your second terminal, point kubectl at the new cluster. `up project run --local` names the cluster after the project directory: @@ -127,7 +122,8 @@ Verify the connection: kubectl get nodes ``` -### Create the namespace and apply credentials +Create the namespace and load AWS credentials and the Anthropic API key into +the cluster: 1. Create the `database-team` namespace: @@ -161,8 +157,6 @@ kubectl get nodes --dry-run=client -o yaml | kubectl apply -f - ``` -### Verify providers and functions - Wait for both AWS providers and both functions to become healthy: ```bash @@ -178,39 +172,41 @@ If `kubectl get providers` or `kubectl get functions` returns **No resources fou [Start the project](#start-the-project). ::: -### Apply the ProviderConfig +Apply the `ProviderConfig`, then the network, then the database: -```bash -kubectl apply -f examples/providerconfig-aws-static.yaml -``` +1. Apply the `ProviderConfig`: -### Provision the network + ```bash + kubectl apply -f examples/providerconfig-aws-static.yaml + ``` -```bash -kubectl apply -f examples/network-rds-metrics.yaml -``` +2. Provision the network: -Wait for the network composite resource to become ready (~5 minutes): + ```bash + kubectl apply -f examples/network-rds-metrics.yaml + ``` -```bash -kubectl get network rds-metrics-database-ai-scale -n database-team -w -``` + Wait for the network composite resource to become ready (~5 minutes): -Press Ctrl+C once it shows `READY: True`. + ```bash + kubectl get network rds-metrics-database-ai-scale -n database-team -w + ``` -### Provision the database + Press Ctrl+C once it shows `READY: True`. -```bash -kubectl apply -f examples/mariadb-xr-rds-metrics.yaml -``` +3. Provision the database: -RDS provisioning takes 10 to 15 minutes. Watch the status: + ```bash + kubectl apply -f examples/mariadb-xr-rds-metrics.yaml + ``` -```bash -kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team -w -``` + RDS provisioning takes 10 to 15 minutes. Watch the status: -Press Ctrl+C once it shows `READY: True` before continuing. + ```bash + kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team -w + ``` + + Press Ctrl+C once it shows `READY: True` before continuing. :::info While you wait, the `function-rds-metrics` composition step is already @@ -218,7 +214,7 @@ collecting CloudWatch data and writing it onto the object. By the time the database is ready, `status.performanceMetrics` contains live data. ::: -### Access the UXP console +Open the UXP console for a visual view of the resources: 1. Enable the web UI: @@ -239,124 +235,119 @@ database is ready, `status.performanceMetrics` contains live data. An RDS MariaDB instance is running on AWS, managed by Crossplane. Before wiring the AI into the loop, explore what the system already knows. -### See the database object - -```bash -kubectl get sqlinstance -n database-team -``` - -You should see `rds-metrics-database-ai-mysql` with `READY: True`. That's a -real AWS RDS instance, managed as a Kubernetes object. +1. List the database object: -In the UXP console, click **View all Composite Resources**. The -`rds-metrics-database-ai-mysql` entry appears in the list. Click -**Relationship View** to see the resources Crossplane provisioned. + ```bash + kubectl get sqlinstance -n database-team + ``` -### Verify the AWS resource + You should see `rds-metrics-database-ai-mysql` with `READY: True`. That's a + real AWS RDS instance, managed as a Kubernetes object. -In the [AWS Console, RDS in `us-east-1`][aws-rds], find -`rds-metrics-database-ai-mysql`. + In the UXP console, click **View all Composite Resources**. The + `rds-metrics-database-ai-mysql` entry appears in the list. Click + **Relationship View** to see the resources Crossplane provisioned. -### Find the performance metrics +2. Verify the AWS resource. In the [AWS Console, RDS in `us-east-1`][aws-rds], + find `rds-metrics-database-ai-mysql`. -```bash -kubectl describe sqlinstance rds-metrics-database-ai-mysql -n database-team -``` - -Find the `status.performanceMetrics` block. This block contains live -CloudWatch data such as CPU utilization, active connections, and free storage. -`function-rds-metrics` collects this data and writes it into the object. The -AI reads only this block and never queries CloudWatch directly. +3. Find the performance metrics: -Or fetch just the metrics: + ```bash + kubectl describe sqlinstance rds-metrics-database-ai-mysql -n database-team + ``` -```bash -kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ - -o jsonpath='{.status.performanceMetrics}' | jq . -``` + Find the `status.performanceMetrics` block. This block contains live + CloudWatch data such as CPU utilization, active connections, and free + storage. `function-rds-metrics` collects this data and writes it into the + object. The AI reads only this block and never queries CloudWatch directly. -### Open the controller + Or fetch just the metrics: -Open `operations/rds-intelligent-scaling-cron/operation.yaml` in your editor. + ```bash + kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.status.performanceMetrics}' | jq . + ``` -That file is the entire scaling controller. The `systemPrompt` defines the -scaling logic, including thresholds, instance class progression, and cooldown. +4. Open `operations/rds-intelligent-scaling-cron/operation.yaml` in your + editor. That file is the entire scaling controller. The `systemPrompt` + defines the scaling logic, including thresholds, instance class progression, + and cooldown. -### Apply the controller +5. Apply the controller: -```bash -kubectl apply -f operations/rds-intelligent-scaling-cron/operation.yaml -``` + ```bash + kubectl apply -f operations/rds-intelligent-scaling-cron/operation.yaml + ``` -### Watch the first decision +6. Watch the first decision: -```bash -kubectl get cronoperation -``` + ```bash + kubectl get cronoperation + ``` -The `CronOperation` takes 30 to 45 seconds to start. Once it's running, watch for the first operation: + The `CronOperation` takes 30 to 45 seconds to start. Once it's running, + watch for the first operation: -```bash -kubectl get operations -w -``` + ```bash + kubectl get operations -w + ``` -Wait until an operation shows `SUCCEEDED: True`, then press Ctrl+C and describe it: + Wait until an operation shows `SUCCEEDED: True`, then press Ctrl+C and + describe it: -```bash -kubectl describe operation -``` + ```bash + kubectl describe operation + ``` -The `Events` section shows the AI's reasoning and decision. + The `Events` section shows the AI's reasoning and decision. -Then check the annotation written back to the database object: +7. Check the annotation written back to the database object: -```bash -kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ - -o jsonpath='{.metadata.annotations}' | jq . -``` + ```bash + kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.metadata.annotations}' | jq . + ``` -In the UXP console, navigate to `rds-metrics-database-ai-mysql` and open the -**YAML** tab. The `intelligent-scaling/last-scaled-decision` annotation -contains the model's last decision. + In the UXP console, navigate to `rds-metrics-database-ai-mysql` and open + the **YAML** tab. The `intelligent-scaling/last-scaled-decision` annotation + contains the model's last decision. ## Watch the controller idle The `CronOperation` runs every minute. CPU is low, so watch what the AI decides when there's nothing to do. -### Watch operations run +1. Watch operations run: -```bash -kubectl get operations -w -``` - -A new operation appears every minute. Press Ctrl+C after several have run. - -In the UXP console, select **Operations** in the left navigation to see the -same list visually. + ```bash + kubectl get operations -w + ``` -### Read a decision + A new operation appears every minute. Press Ctrl+C after several have run. + In the UXP console, select **Operations** in the left navigation to see the + same list visually. -Pick one of the operation names and describe it: +2. Read one of the decisions: -```bash -kubectl describe operation -``` + ```bash + kubectl describe operation + ``` -Look at the `Events` section. At low CPU, the AI decides to hold. The cooldown -logic is also in the prompt, so it doesn't flip the instance class every minute -even if usage crosses the thresholds. + Look at the `Events` section. At low CPU, the AI decides to hold. The + cooldown logic is also in the prompt, so it doesn't flip the instance class + every minute even if usage crosses the thresholds. -### See the current metrics +3. Look at the current metrics: -```bash -kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ - -o jsonpath='{.status.performanceMetrics}' | jq . -``` + ```bash + kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.status.performanceMetrics}' | jq . + ``` -The AI reads this same data before making a decision. + The AI reads this same data before making a decision. -### See the current instance class +4. Confirm the current instance class: ```bash kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ @@ -373,65 +364,59 @@ You can also confirm the current instance type in the [AWS Console, RDS in Run a load test that drives CPU above the scaling threshold so the AI decides to act. -### Confirm the starting instance class - -```bash -kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ - -o jsonpath='{.spec.parameters.instanceClass}' -``` - -It should be `db.t3.micro`. +1. Confirm the starting instance class: -### Run the load test + ```bash + kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.spec.parameters.instanceClass}' + ``` -In a second terminal, run the load test from inside the `demo` directory: + It should be `db.t3.micro`. -```bash -bash perf-scale-demo.sh -``` - -The script sends CPU-intensive queries to the database for 5 to 10 minutes. -If it finishes without triggering a scale, run it again. +2. In a second terminal, run the load test from inside the `demo` directory: -### Watch the metrics climb + ```bash + bash perf-scale-demo.sh + ``` -In your first terminal, watch the metrics update every 10 seconds: + The script sends CPU-intensive queries to the database for 5 to 10 minutes. + If it finishes without triggering a scale, run it again. -```bash -watch -n 10 "kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ - -o jsonpath='{.status.performanceMetrics.metrics}' | jq ." -``` +3. In your first terminal, watch the metrics update every 10 seconds: -### Watch the controller act + ```bash + watch -n 10 "kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.status.performanceMetrics.metrics}' | jq ." + ``` -Press Ctrl+C to exit the watch command, then: +4. Press Ctrl+C to exit the watch command, then watch the controller act: -```bash -kubectl get operations -w -``` + ```bash + kubectl get operations -w + ``` -When CPU crosses the threshold (~60%), the next `CronOperation` decides to -scale up. Press Ctrl+C once you see a new operation start. + When CPU crosses the threshold (~60%), the next `CronOperation` decides to + scale up. Press Ctrl+C once you see a new operation start. -### See the scale event +5. Check the new instance class: -Check the instance class: + ```bash + kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.spec.parameters.instanceClass}' + ``` -```bash -kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ - -o jsonpath='{.spec.parameters.instanceClass}' -``` + It should now be `db.t3.small`. -It should now be `db.t3.small`. Check the reasoning: +6. Check the reasoning: -```bash -kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ - -o jsonpath='{.metadata.annotations.intelligent-scaling/last-scaled-decision}' -``` + ```bash + kubectl get sqlinstance rds-metrics-database-ai-mysql -n database-team \ + -o jsonpath='{.metadata.annotations.intelligent-scaling/last-scaled-decision}' + ``` -In the [AWS Console, RDS in `us-east-1`][aws-rds], refresh the database list. -The instance class change is in progress, and RDS is modifying the live -database. + In the [AWS Console, RDS in `us-east-1`][aws-rds], refresh the database + list. The instance class change is in progress, and RDS is modifying the + live database. ## Clean up diff --git a/docs/getstarted/platform-tutorial.md b/docs/getstarted/platform-tutorial.md index d7d73461..da4e1e54 100644 --- a/docs/getstarted/platform-tutorial.md +++ b/docs/getstarted/platform-tutorial.md @@ -46,8 +46,6 @@ cd app-w-db All commands from this point run from inside the `app-w-db` directory. -### Add provider and function dependencies - The platform composes AWS resources and uses `function-auto-ready` so composite resources report ready status. Add them as project dependencies: @@ -61,8 +59,6 @@ up dependency add 'xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.6.1 `up dependency add` records each dependency in `upbound.yaml`. -### Define the platform APIs - The platform exposes two APIs: `AppWDB` (a basic app with a database) and `AppWDBSecure` (the same API with an optional security context, used later for policy enforcement). @@ -194,8 +190,6 @@ spec: EOF ``` -### Create the composition function - The composition function is a KCL program that maps the user's 10-line request to the full set of AWS resources. @@ -356,8 +350,6 @@ items = _items EOF ``` -### Create example manifests - Create the base example and the variants used in later steps: ```bash @@ -437,8 +429,6 @@ spec: EOF ``` -### Create the ProviderConfig - The `ProviderConfig` tells the AWS providers where to find credentials. ```bash @@ -493,8 +483,6 @@ Startup takes several minutes. Keep this terminal open throughout the tutorial. This message reports a telemetry timeout and doesn't affect the cluster setup. ::: -### Configure kubectl - In your second terminal, point kubectl at the new cluster: ```bash @@ -513,7 +501,7 @@ Verify the connection: kubectl get nodes ``` -### Apply AWS credentials +Apply your AWS credentials so providers can authenticate: 1. Create the `demo` namespace: @@ -530,8 +518,6 @@ kubectl get nodes "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY")" ``` -### Verify the setup - Check that all four providers report healthy: ```bash @@ -559,9 +545,7 @@ If this returns **No resources found**, the KCL function wasn't built or deployed. Check the `up project run` terminal and restart. ::: -### Apply the compositions - -Get the exact function name assigned by `up project run`: +Capture the function name assigned by `up project run`: ```bash FUNC_NAME=$(kubectl get functions --no-headers | grep -v 'crossplane-contrib' | awk '{print $1}') @@ -632,9 +616,7 @@ If this returns **No resources found**, stop here. Return to the `up project run` terminal to diagnose the failure. ::: -### Apply the ProviderConfig - -Apply this only after all providers are healthy: +Apply the `ProviderConfig` only after all providers are healthy: ```bash kubectl apply -f setup/config/ @@ -645,8 +627,6 @@ AWS resource provisioning takes 5 to 8 minutes for RDS. Each section of this tutorial gives you something to read while AWS works. ::: -### Access the UXP console - The UXP console provides a visual interface for browsing composite resources, viewing resource relationship graphs, and checking sync status. @@ -706,7 +686,7 @@ and `demo-01-db` under **RDS → Databases** (about 5 to 8 minutes). In the UXP console, click into `demo-01` and open the **relationship view** to see all composed resources and their sync status. -### Explore the composition +Now look at the files that produced those resources. Open `apis/appwdb/definition.yaml`. @@ -738,8 +718,6 @@ applied in `examples/appwdb/example.yaml` is a Composite Resource. Instead of giving end users raw AWS access, the platform team defines higher-level abstractions like `AppWDB`, and end users request those. -### Providers and ProviderConfigs - **Providers** are how Crossplane talks to external systems like AWS. Each provider is a Kubernetes controller that manages a specific service such as EC2, RDS, or IAM. In Crossplane 2.0, Crossplane composes the Kubernetes @@ -782,7 +760,7 @@ If someone changes a resource directly in AWS, Crossplane detects the difference between desired state and actual state and corrects it. Crossplane calls this drift detection. -### Trigger drift +Trigger drift by changing a VPC tag in AWS, then watch Crossplane revert it: 1. Verify the VPC reached `SYNCED: True`: @@ -816,7 +794,7 @@ calls this drift detection. The control plane detected the drift and corrected it. -### Verify recovery +Confirm the composite resource is back in sync: ```bash kubectl get appwdb demo-01 -n demo @@ -830,7 +808,7 @@ Kyverno is a policy engine that intercepts Kubernetes admission requests before they're accepted. Kyverno blocks a policy violation before Crossplane runs, so nothing reaches AWS. -### Install the policy engine +Install the Kyverno add-on and a policy that blocks privileged containers: 1. Create the Kyverno add-on manifest: @@ -939,7 +917,7 @@ nothing reaches AWS. `READY: True` means the policy is enforcing. -### Block a privileged request +Now confirm the policy blocks a privileged request and accepts a compliant one. :::warning Kyverno can only check requests for resource types whose CRDs already exist in @@ -960,9 +938,9 @@ ready yet. Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. `demo-01`, which you deployed before adding Kyverno, still has a running RDS instance. This request didn't start one. -### Apply a compliant request +Now try the same request with `privileged: false`: -1. Apply the compliant version (`privileged: false`): +1. Apply the compliant version: ```bash kubectl apply -f examples/appwdbsecure/example-2.yaml @@ -979,11 +957,10 @@ ready yet. Confirm `kubectl get xrds` shows both XRDs as `ESTABLISHED: True`. ## Change it live To change infrastructure, update the desired state. Crossplane figures out -what needs to change and does it. - -### Scale the database +what needs to change and does it. Try scaling the database first, then the +replicas. -1. Apply the change: +1. Scale the database by applying the larger-db variant: ```bash kubectl apply -f examples/appwdb/variant-bigger-db.yaml @@ -1004,22 +981,20 @@ what needs to change and does it. kubectl get appwdb demo-01 -n demo ``` -### Scale the replicas - -1. Apply the change: +5. Scale the app replicas by applying the more-replicas variant: ```bash kubectl apply -f examples/appwdb/variant-more-replicas.yaml ``` -2. Watch the `Deployment` scale (~30 seconds): +6. Watch the `Deployment` scale (~30 seconds): ```bash kubectl get deployment demo-01 -n demo -w \ -o custom-columns='NAME:.metadata.name,DESIRED:.spec.replicas,READY:.status.readyReplicas' ``` -3. Confirm the change: +7. Confirm the change: ```bash kubectl get appwdb demo-01 -n demo